entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
DuRB_p | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/vo/cvoamjxqo3p3brgotscd3omngjkjdnhu2abijzcog3fhhkajblew.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/l2/cl25hve4tjj2oucqzrnmndqgpd6in5oyzk52s73avq2kjincyij2.py
# Topologically Sorted Source Nodes: [out_1, x, out_2], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_1 => convolution
# out_2 => _unsafe_index_2, _unsafe_index_3
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_convolution_reflection_pad2d_relu_1 = async_compile.triton('triton_poi_fused_convolution_reflection_pad2d_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_reflection_pad2d_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x4 = (xindex // 36)
x2 = (xindex // 36) % 32
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x5), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uq/cuqb3bw6jfbmiqe4iqumf4rndurjwyqwtoczmckklamlf6ma5xoc.py
# Topologically Sorted Source Nodes: [out_3, x_1, x_2, out_4], Original ATen: [aten.convolution, aten.add, aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_3 => convolution_1
# out_4 => _unsafe_index_4, _unsafe_index_5
# x_1 => add
# x_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_5 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_4, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_add_convolution_reflection_pad2d_relu_2 = async_compile.triton('triton_poi_fused_add_convolution_reflection_pad2d_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_reflection_pad2d_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x4 = (xindex // 36)
x2 = (xindex // 36) % 32
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x5), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2v/c2vbclfjx7gvstfqequubloqm3heyyndgpki4oy62nqplb7w5n35.py
# Topologically Sorted Source Nodes: [out_5, x_3, x_4], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_5 => convolution_2
# x_3 => add_1
# x_4 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_5, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_2, %primals_8), kwargs = {})
# %relu_2 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2l/c2lnkzpovreaysdugdd2zmqp3i7xnr4in6re27x6xljispz6ysro.py
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_6 => iota_6
# Graph fragment:
# %iota_6 : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_reflection_pad2d_4 = async_compile.triton('triton_poi_fused_reflection_pad2d_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_4(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fu/cfunq6tp4ce7tfiahrebjklcitgvljfxowid7xxzmapcuo5uhfra.py
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_6 => _unsafe_index_6, _unsafe_index_7
# Graph fragment:
# %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_2, [None, None, %sub_13, None]), kwargs = {})
# %_unsafe_index_7 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_6, [None, None, None, %sub_13]), kwargs = {})
triton_poi_fused_reflection_pad2d_5 = async_compile.triton('triton_poi_fused_reflection_pad2d_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), None)
tl.store(out_ptr0 + (x3), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/a7/ca7auwkj4fyh4ovcrbwfuerpjlhd7mu7hkorpkwl7hkjck4hsjqn.py
# Topologically Sorted Source Nodes: [out_3, x_1, x_2, out_7, x_5, x_6], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_3 => convolution_1
# out_7 => convolution_3
# x_1 => add
# x_2 => relu_1
# x_5 => add_2
# x_6 => relu_3
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_7, %primals_9, %primals_10, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_3, %primals_1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
# %le_22 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp9 = tl.load(in_ptr2 + (x3), None)
tmp10 = tl.load(in_ptr3 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tmp11 = tmp9 + tmp10
tmp12 = tmp11 + tmp3
tmp13 = triton_helpers.maximum(tmp5, tmp12)
tmp14 = tmp13 <= tmp7
tl.store(in_out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr0 + (x3), tmp8, None)
tl.store(out_ptr1 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ib/cibfc6qhgolpnfgffu4u5dxagpmtybo755xztk2ycggi63ga57lt.py
# Topologically Sorted Source Nodes: [out_1, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_41 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_2, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (4, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_9, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_10, (32, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 4608, grid=grid(4608), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 4, 4), (512, 16, 4, 1))
buf2 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1, x, out_2], Original ATen: [aten.convolution, aten.relu, aten.reflection_pad2d]
triton_poi_fused_convolution_reflection_pad2d_relu_1.run(buf1, primals_3, buf2, 4608, grid=grid(4608), stream=stream0)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_3, x_1, x_2, out_4], Original ATen: [aten.convolution, aten.add, aten.relu, aten.reflection_pad2d]
triton_poi_fused_add_convolution_reflection_pad2d_relu_2.run(buf3, primals_5, primals_1, buf4, 4608, grid=grid(4608), stream=stream0)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 4, 4), (512, 16, 4, 1))
buf6 = buf5; del buf5 # reuse
buf12 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_5, x_3, x_4], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_threshold_backward_3.run(buf6, primals_7, primals_8, buf12, 2048, grid=grid(2048), stream=stream0)
del primals_7
del primals_8
buf7 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_4.run(buf7, 4, grid=grid(4), stream=stream0)
buf8 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_5.run(buf6, buf8, 2048, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 4, 4), (512, 16, 4, 1))
buf10 = buf9; del buf9 # reuse
buf11 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf13 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3, x_1, x_2, out_7, x_5, x_6], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_threshold_backward_6.run(buf10, primals_10, primals_1, buf3, primals_5, buf11, buf13, 2048, grid=grid(2048), stream=stream0)
del buf3
del primals_1
del primals_10
del primals_5
buf14 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_7.run(buf1, primals_3, buf14, 2048, grid=grid(2048), stream=stream0)
del buf1
del primals_3
return (buf10, buf6, primals_2, primals_4, primals_6, primals_9, buf0, buf2, buf4, buf7, buf8, buf11, buf12, buf13, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.serialization
import torch
import torch.nn as nn
import torch.utils.data
class ConvLayer(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size, stride, dilation=1):
super(ConvLayer, self).__init__()
self.dilation = dilation
if dilation == 1:
reflect_padding = int(np.floor(kernel_size / 2))
self.reflection_pad = nn.ReflectionPad2d(reflect_padding)
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride,
dilation=dilation)
else:
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride,
dilation=dilation, padding=dilation)
def forward(self, x):
if self.dilation == 1:
out = self.reflection_pad(x)
out = self.conv2d(out)
else:
out = self.conv2d(x)
return out
class DuRB_p(nn.Module):
def __init__(self, in_dim=32, out_dim=32, res_dim=32, k1_size=3,
k2_size=1, dilation=1, norm_type='batch_norm', with_relu=True):
super(DuRB_p, self).__init__()
self.conv1 = ConvLayer(in_dim, in_dim, 3, 1)
self.conv2 = ConvLayer(in_dim, in_dim, 3, 1)
self.up_conv = ConvLayer(in_dim, res_dim, kernel_size=k1_size,
stride=1, dilation=dilation)
self.down_conv = ConvLayer(res_dim, out_dim, kernel_size=k2_size,
stride=1)
self.with_relu = with_relu
self.relu = nn.ReLU()
def forward(self, x, res):
x_r = x
x = self.relu(self.conv1(x))
x = self.conv2(x)
x += x_r
x = self.relu(x)
x = self.up_conv(x)
x += res
x = self.relu(x)
res = x
x = self.down_conv(x)
x += x_r
if self.with_relu:
x = self.relu(x)
else:
pass
return x, res
def get_inputs():
return [torch.rand([4, 32, 4, 4]), torch.rand([4, 32, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.serialization
import torch
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_reflection_pad2d_relu_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x4 = xindex // 36
x2 = xindex // 36 % 32
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_reflection_pad2d_relu_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x4 = xindex // 36
x2 = xindex // 36 % 32
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x5, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, None)
tl.store(out_ptr0 + x3, tmp8, None)
@triton.jit
def triton_poi_fused_reflection_pad2d_4(out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math
.abs(-3 + x1) + 16 * x2), None)
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_6(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp9 = tl.load(in_ptr2 + x3, None)
tmp10 = tl.load(in_ptr3 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = 0.0
tmp8 = tmp6 <= tmp7
tmp11 = tmp9 + tmp10
tmp12 = tmp11 + tmp3
tmp13 = triton_helpers.maximum(tmp5, tmp12)
tmp14 = tmp13 <= tmp7
tl.store(in_out_ptr0 + x3, tmp6, None)
tl.store(out_ptr0 + x3, tmp8, None)
tl.store(out_ptr1 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_7(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_2, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (4, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_9, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_10, (32,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(4608)](primals_1, buf0,
4608, XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 4, 4), (512, 16, 4, 1))
buf2 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.
float32)
triton_poi_fused_convolution_reflection_pad2d_relu_1[grid(4608)](buf1,
primals_3, buf2, 4608, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.
float32)
triton_poi_fused_add_convolution_reflection_pad2d_relu_2[grid(4608)](
buf3, primals_5, primals_1, buf4, 4608, XBLOCK=256, num_warps=4,
num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 4, 4), (512, 16, 4, 1))
buf6 = buf5
del buf5
buf12 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_3[grid(2048)](
buf6, primals_7, primals_8, buf12, 2048, XBLOCK=256, num_warps=
4, num_stages=1)
del primals_7
del primals_8
buf7 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_reflection_pad2d_4[grid(4)](buf7, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32
)
triton_poi_fused_reflection_pad2d_5[grid(2048)](buf6, buf8, 2048,
XBLOCK=256, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf8, primals_9, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 4, 4), (512, 16, 4, 1))
buf10 = buf9
del buf9
buf11 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf13 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_6[grid(2048)](
buf10, primals_10, primals_1, buf3, primals_5, buf11, buf13,
2048, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_1
del primals_10
del primals_5
buf14 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_7[grid(2048)](buf1
, primals_3, buf14, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
return (buf10, buf6, primals_2, primals_4, primals_6, primals_9, buf0,
buf2, buf4, buf7, buf8, buf11, buf12, buf13, buf14)
class ConvLayer(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size, stride, dilation=1):
super(ConvLayer, self).__init__()
self.dilation = dilation
if dilation == 1:
reflect_padding = int(np.floor(kernel_size / 2))
self.reflection_pad = nn.ReflectionPad2d(reflect_padding)
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride,
dilation=dilation)
else:
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride,
dilation=dilation, padding=dilation)
def forward(self, x):
if self.dilation == 1:
out = self.reflection_pad(x)
out = self.conv2d(out)
else:
out = self.conv2d(x)
return out
class DuRB_pNew(nn.Module):
def __init__(self, in_dim=32, out_dim=32, res_dim=32, k1_size=3,
k2_size=1, dilation=1, norm_type='batch_norm', with_relu=True):
super(DuRB_pNew, self).__init__()
self.conv1 = ConvLayer(in_dim, in_dim, 3, 1)
self.conv2 = ConvLayer(in_dim, in_dim, 3, 1)
self.up_conv = ConvLayer(in_dim, res_dim, kernel_size=k1_size,
stride=1, dilation=dilation)
self.down_conv = ConvLayer(res_dim, out_dim, kernel_size=k2_size,
stride=1)
self.with_relu = with_relu
self.relu = nn.ReLU()
def forward(self, input_0, input_1):
primals_2 = self.conv1.conv2d.weight
primals_3 = self.conv1.conv2d.bias
primals_4 = self.conv2.conv2d.weight
primals_5 = self.conv2.conv2d.bias
primals_6 = self.up_conv.conv2d.weight
primals_7 = self.up_conv.conv2d.bias
primals_9 = self.down_conv.conv2d.weight
primals_10 = self.down_conv.conv2d.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0], output[1]
| vis-opt-group/GTANet | DuRB_p | false | 4,500 | [
"MIT"
] | 0 | 269ff4418ee5f0267987e1fa4c69bda13e5cb00d | https://github.com/vis-opt-group/GTANet/tree/269ff4418ee5f0267987e1fa4c69bda13e5cb00d | import torch
import numpy as np
import torch.serialization
import torch
import torch.nn as nn
import torch.utils.data
class ConvLayer(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size, stride, dilation=1):
super().__init__()
self.dilation = dilation
if dilation == 1:
reflect_padding = int(np.floor(kernel_size / 2))
self.reflection_pad = nn.ReflectionPad2d(reflect_padding)
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride,
dilation=dilation)
else:
self.conv2d = nn.Conv2d(in_dim, out_dim, kernel_size, stride,
dilation=dilation, padding=dilation)
def forward(self, x):
if self.dilation == 1:
out = self.reflection_pad(x)
out = self.conv2d(out)
else:
out = self.conv2d(x)
return out
class Model(nn.Module):
def __init__(self, in_dim=32, out_dim=32, res_dim=32, k1_size=3,
k2_size=1, dilation=1, norm_type='batch_norm', with_relu=True):
super().__init__()
self.conv1 = ConvLayer(in_dim, in_dim, 3, 1)
self.conv2 = ConvLayer(in_dim, in_dim, 3, 1)
self.up_conv = ConvLayer(in_dim, res_dim, kernel_size=k1_size,
stride=1, dilation=dilation)
self.down_conv = ConvLayer(res_dim, out_dim, kernel_size=k2_size,
stride=1)
self.with_relu = with_relu
self.relu = nn.ReLU()
def forward(self, x, res):
x_r = x
x = self.relu(self.conv1(x))
x = self.conv2(x)
x += x_r
x = self.relu(x)
x = self.up_conv(x)
x += res
x = self.relu(x)
res = x
x = self.down_conv(x)
x += x_r
if self.with_relu:
x = self.relu(x)
else:
pass
return x, res
def get_inputs():
return [torch.rand([4, 32, 4, 4]), torch.rand([4, 32, 4, 4])]
def get_init_inputs():
return []
|
Accuracy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bt/cbt3ifwhfqxk6gfux2vr2qvcztv4fen56aagzwb2jnxtnysnfw37.py
# Topologically Sorted Source Nodes: [pred, lab, eq, correct, float_1, truediv, acc], Original ATen: [aten.ge, aten.eq, aten.sum, aten._to_copy, aten.div, aten.mul]
# Source node to ATen node mapping:
# acc => mul
# correct => sum_1
# eq => eq
# float_1 => convert_element_type
# lab => ge_1
# pred => ge
# truediv => div
# Graph fragment:
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg0_1, 0.5), kwargs = {})
# %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg1_1, 0.5), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%ge, %ge_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%eq,), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_1, torch.float32), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convert_element_type, 256), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 100.0), kwargs = {})
triton_per_fused__to_copy_div_eq_ge_mul_sum_0 = async_compile.triton('triton_per_fused__to_copy_div_eq_ge_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_div_eq_ge_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_div_eq_ge_mul_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 0.5
tmp2 = tmp0 >= tmp1
tmp4 = tmp3 >= tmp1
tmp5 = tmp2 == tmp4
tmp6 = tmp5.to(tl.int64)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp9.to(tl.float32)
tmp11 = 0.00390625
tmp12 = tmp10 * tmp11
tmp13 = 100.0
tmp14 = tmp12 * tmp13
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [pred, lab, eq, correct, float_1, truediv, acc], Original ATen: [aten.ge, aten.eq, aten.sum, aten._to_copy, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_div_eq_ge_mul_sum_0.run(arg0_1, arg1_1, buf1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch import Tensor
class Accuracy(Module):
"""
Class for calculating the accuracy for a given prediction and the labels
for comparison.
Expects the inputs to be from a range of 0 to 1 and sets a crossing threshold at 0.5
the labels are similarly rounded.
"""
def forward(self, pred: 'Tensor', lab: 'Tensor') ->Tensor:
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
return Accuracy.calculate(pred, lab)
@staticmethod
def calculate(pred: 'Tensor', lab: 'Tensor'):
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
pred = pred >= 0.5
lab = lab >= 0.5
correct = (pred == lab).sum()
total = lab.numel()
acc = correct.float() / total * 100.0
return acc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
from torch import Tensor
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_div_eq_ge_mul_sum_0(in_ptr0, in_ptr1,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 >= tmp1
tmp4 = tmp3 >= tmp1
tmp5 = tmp2 == tmp4
tmp6 = tmp5.to(tl.int64)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp9.to(tl.float32)
tmp11 = 0.00390625
tmp12 = tmp10 * tmp11
tmp13 = 100.0
tmp14 = tmp12 * tmp13
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_div_eq_ge_mul_sum_0[grid(1)](arg0_1,
arg1_1, buf1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class AccuracyNew(Module):
"""
Class for calculating the accuracy for a given prediction and the labels
for comparison.
Expects the inputs to be from a range of 0 to 1 and sets a crossing threshold at 0.5
the labels are similarly rounded.
"""
@staticmethod
def calculate(pred: 'Tensor', lab: 'Tensor'):
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
pred = pred >= 0.5
lab = lab >= 0.5
correct = (pred == lab).sum()
total = lab.numel()
acc = correct.float() / total * 100.0
return acc
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| vixadd/sparseml | Accuracy | false | 4,501 | [
"Apache-2.0"
] | 0 | e2dcb66bad713542158dfe54cba113a0cc02ed39 | https://github.com/vixadd/sparseml/tree/e2dcb66bad713542158dfe54cba113a0cc02ed39 | from torch.nn import Module
import torch
from torch import Tensor
class Model(Module):
"""
Class for calculating the accuracy for a given prediction and the labels
for comparison.
Expects the inputs to be from a range of 0 to 1 and sets a crossing threshold at 0.5
the labels are similarly rounded.
"""
def forward(self, pred: 'Tensor', lab: 'Tensor') ->Tensor:
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
return Accuracy.calculate(pred, lab)
@staticmethod
def calculate(pred: 'Tensor', lab: 'Tensor'):
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
pred = pred >= 0.5
lab = lab >= 0.5
correct = (pred == lab).sum()
total = lab.numel()
acc = correct.float() / total * 100.0
return acc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
PureUpsampling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yv/cyvap7j3rcqrtuv3wrc3n4rlhc4wagsezo7s4lrfe53ili5imvei.py
# Topologically Sorted Source Nodes: [xout], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
# Source node to ATen node mapping:
# xout => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4
# Graph fragment:
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, 0.42857142857142855), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_1, 0.0), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp16 + tmp26
tmp28 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last')
tmp30 = tmp29 - tmp28
tmp31 = tmp30 * tmp25
tmp32 = tmp28 + tmp31
tmp33 = tmp27 - tmp32
tmp34 = tmp6.to(tl.float32)
tmp35 = tmp5 - tmp34
tmp36 = triton_helpers.maximum(tmp35, tmp4)
tmp37 = triton_helpers.minimum(tmp36, tmp24)
tmp38 = tmp33 * tmp37
tmp39 = tmp32 + tmp38
tl.store(in_out_ptr0 + (x4), tmp39, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [xout], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf1, arg0_1, 1024, grid=grid(1024), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PureUpsampling(nn.Module):
def __init__(self, scale=2, mode='bilinear'):
super(PureUpsampling, self).__init__()
assert isinstance(scale, int)
self.scale = scale
self.mode = mode
def forward(self, x):
h, w = x.size(2) * self.scale, x.size(3) * self.scale
if self.mode == 'nearest':
xout = F.interpolate(input=x, size=(h, w), mode=self.mode)
else:
xout = F.interpolate(input=x, size=(h, w), mode=self.mode,
align_corners=True)
return xout
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp16 + tmp26
tmp28 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp30 = tmp29 - tmp28
tmp31 = tmp30 * tmp25
tmp32 = tmp28 + tmp31
tmp33 = tmp27 - tmp32
tmp34 = tmp6.to(tl.float32)
tmp35 = tmp5 - tmp34
tmp36 = triton_helpers.maximum(tmp35, tmp4)
tmp37 = triton_helpers.minimum(tmp36, tmp24)
tmp38 = tmp33 * tmp37
tmp39 = tmp32 + tmp38
tl.store(in_out_ptr0 + x4, tmp39, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(1024)](buf1, arg0_1, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf1,
class PureUpsamplingNew(nn.Module):
def __init__(self, scale=2, mode='bilinear'):
super(PureUpsamplingNew, self).__init__()
assert isinstance(scale, int)
self.scale = scale
self.mode = mode
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| vlbthambawita/polyp-inpainting | PureUpsampling | false | 4,502 | [
"MIT"
] | 0 | f1d754f8ffb3f6d991206b2a661933ff32de0d7a | https://github.com/vlbthambawita/polyp-inpainting/tree/f1d754f8ffb3f6d991206b2a661933ff32de0d7a | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, scale=2, mode='bilinear'):
super().__init__()
assert isinstance(scale, int)
self.scale = scale
self.mode = mode
def forward(self, x):
h, w = x.size(2) * self.scale, x.size(3) * self.scale
if self.mode == 'nearest':
xout = F.interpolate(input=x, size=(h, w), mode=self.mode)
else:
xout = F.interpolate(input=x, size=(h, w), mode=self.mode,
align_corners=True)
return xout
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
RandomCrop | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/uk/cukf4655cuh7fnvptcwcf6drfinabsttxvrtod7lygls4ko5acrr.py
# Topologically Sorted Source Nodes: [patches_3], Original ATen: [aten.index]
# Source node to ATen node mapping:
# patches_3 => index_1
# Graph fragment:
# %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%unfold_1, [%iota_default, None, %randint_1]), kwargs = {})
triton_poi_fused_index_0 = async_compile.triton('triton_poi_fused_index_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 1, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 1)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 1")
tmp7 = tmp6 + tmp1
tmp8 = tmp6 < 0
tmp9 = tl.where(tmp8, tmp7, tmp6)
tl.device_assert(((0 <= tmp9) & (tmp9 < 1)) | ~(xmask), "index out of bounds: 0 <= tmp9 < 1")
tmp11 = tl.load(in_ptr2 + (tmp4 + x2 + (4*tmp9)), xmask)
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [idx], Original ATen: [aten.randint]
buf0 = torch.ops.aten.randint.low(0, 1, [4], device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
# Topologically Sorted Source Nodes: [idx_1], Original ATen: [aten.randint]
buf2 = torch.ops.aten.randint.low(0, 1, [4], device=device(type='cuda', index=0), pin_memory=False)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [patches_3], Original ATen: [aten.index]
stream0 = get_raw_stream(0)
triton_poi_fused_index_0.run(buf3, buf1, arg0_1, buf4, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf1
del buf3
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
def choose_rand_patches(x, patch_sz, dim):
assert dim == 2 or dim == 3
batch_sz = x.shape[0]
patches = x.unfold(dim, patch_sz, 1)
n_patches = patches.shape[2]
idx = torch.randint(0, n_patches, (batch_sz,))
if dim == 2:
patches = patches[torch.arange(batch_sz), :, idx, :]
elif dim == 3:
patches = patches[torch.arange(batch_sz), :, :, idx]
return patches
class RandomCrop(nn.Module):
def __init__(self, crop_sz):
super(RandomCrop, self).__init__()
self.crop_sz = crop_sz
def forward(self, x):
img_sz = x.shape[-1]
assert img_sz >= self.crop_sz, f'img_sz {img_sz} is too small for crop_sz {self.crop_sz}'
x = choose_rand_patches(x, self.crop_sz, 2)
x = choose_rand_patches(x, self.crop_sz, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'crop_sz': 4}]
| import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 1, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 1) | ~xmask,
'index out of bounds: 0 <= tmp4 < 1')
tmp7 = tmp6 + tmp1
tmp8 = tmp6 < 0
tmp9 = tl.where(tmp8, tmp7, tmp6)
tl.device_assert((0 <= tmp9) & (tmp9 < 1) | ~xmask,
'index out of bounds: 0 <= tmp9 < 1')
tmp11 = tl.load(in_ptr2 + (tmp4 + x2 + 4 * tmp9), xmask)
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.randint.low(0, 1, [4], device=device(type=
'cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = torch.ops.aten.randint.low(0, 1, [4], device=device(type=
'cuda', index=0), pin_memory=False)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_0[grid(256)](buf3, buf1, arg0_1, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del buf1
del buf3
return buf4,
def choose_rand_patches(x, patch_sz, dim):
assert dim == 2 or dim == 3
batch_sz = x.shape[0]
patches = x.unfold(dim, patch_sz, 1)
n_patches = patches.shape[2]
idx = torch.randint(0, n_patches, (batch_sz,))
if dim == 2:
patches = patches[torch.arange(batch_sz), :, idx, :]
elif dim == 3:
patches = patches[torch.arange(batch_sz), :, :, idx]
return patches
class RandomCropNew(nn.Module):
def __init__(self, crop_sz):
super(RandomCropNew, self).__init__()
self.crop_sz = crop_sz
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| vitskvara/shape-guided-anomaly-detection | RandomCrop | false | 4,503 | [
"MIT"
] | 0 | 6685b2e0b97968a6d0f478d2920486da107b277f | https://github.com/vitskvara/shape-guided-anomaly-detection/tree/6685b2e0b97968a6d0f478d2920486da107b277f | import torch
from torch import nn
def choose_rand_patches(x, patch_sz, dim):
assert dim == 2 or dim == 3
batch_sz = x.shape[0]
patches = x.unfold(dim, patch_sz, 1)
n_patches = patches.shape[2]
idx = torch.randint(0, n_patches, (batch_sz,))
if dim == 2:
patches = patches[torch.arange(batch_sz), :, idx, :]
elif dim == 3:
patches = patches[torch.arange(batch_sz), :, :, idx]
return patches
class Model(nn.Module):
def __init__(self, crop_sz):
super().__init__()
self.crop_sz = crop_sz
def forward(self, x):
img_sz = x.shape[-1]
assert img_sz >= self.crop_sz, f'img_sz {img_sz} is too small for crop_sz {self.crop_sz}'
x = choose_rand_patches(x, self.crop_sz, 2)
x = choose_rand_patches(x, self.crop_sz, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
TVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/o6/co6yd3unt6464cqtdhaf72ztdbe2oj7bhrdg57pwfpdqh44anqbx.py
# Topologically Sorted Source Nodes: [sub, h_tv, sum_1, sub_1, w_tv, sum_2, loss], Original ATen: [aten.sub, aten.abs, aten.sum, aten.add]
# Source node to ATen node mapping:
# h_tv => abs_1
# loss => add
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# sum_2 => sum_2
# w_tv => abs_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_abs_add_sub_sum_0 = async_compile.triton('triton_per_fused_abs_add_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 12
r1 = (rindex // 12)
r2 = rindex % 3
r3 = (rindex // 3)
tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + (4*r3)), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + (4*r3)), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, h_tv, sum_1, sub_1, w_tv, sum_2, loss], Original ATen: [aten.sub, aten.abs, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_sub_sum_0.run(buf2, arg0_1, 1, 192, grid=grid(1), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class TVLoss(nn.Module):
def __init__(self):
super(TVLoss, self).__init__()
def forward(self, x):
h_x, w_x = x.size()[2:]
h_tv = torch.abs(x[:, :, 1:, :] - x[:, :, :h_x - 1, :])
w_tv = torch.abs(x[:, :, :, 1:] - x[:, :, :, :w_x - 1])
loss = torch.sum(h_tv) + torch.sum(w_tv)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_sub_sum_0[grid(1)](buf2, arg0_1, 1, 192,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
def __init__(self):
super(TVLossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| vlbthambawita/polyp-inpainting | TVLoss | false | 4,504 | [
"MIT"
] | 0 | f1d754f8ffb3f6d991206b2a661933ff32de0d7a | https://github.com/vlbthambawita/polyp-inpainting/tree/f1d754f8ffb3f6d991206b2a661933ff32de0d7a | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
h_x, w_x = x.size()[2:]
h_tv = torch.abs(x[:, :, 1:, :] - x[:, :, :h_x - 1, :])
w_tv = torch.abs(x[:, :, :, 1:] - x[:, :, :, :w_x - 1])
loss = torch.sum(h_tv) + torch.sum(w_tv)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
conv_head_pooling | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cls_token], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return (buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class conv_head_pooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, conv_type,
padding_mode='zeros', dilation=1):
super(conv_head_pooling, self).__init__()
if conv_type == 'depthwise':
_groups = in_feature
else:
_groups = 1
None
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=3,
padding=dilation, dilation=dilation, stride=stride,
padding_mode=padding_mode, groups=_groups)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token):
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4, 'out_feature': 4, 'stride': 1,
'conv_type': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_4
del primals_5
return buf1, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_3, reinterpret_tensor(primals_6, (64, 4), (4,
1), 0)
class conv_head_poolingNew(nn.Module):
def __init__(self, in_feature, out_feature, stride, conv_type,
padding_mode='zeros', dilation=1):
super(conv_head_poolingNew, self).__init__()
if conv_type == 'depthwise':
_groups = in_feature
else:
_groups = 1
None
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=3,
padding=dilation, dilation=dilation, stride=stride,
padding_mode=padding_mode, groups=_groups)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, input_0, input_1):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.fc.weight
primals_5 = self.fc.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| tsubauaaa/d2go | conv_head_pooling | false | 4,505 | [
"Apache-2.0"
] | 0 | 9f746159ebf78ce79f644c405ca8695bc29d1075 | https://github.com/tsubauaaa/d2go/tree/9f746159ebf78ce79f644c405ca8695bc29d1075 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, in_feature, out_feature, stride, conv_type,
padding_mode='zeros', dilation=1):
super().__init__()
if conv_type == 'depthwise':
_groups = in_feature
else:
_groups = 1
None
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=3,
padding=dilation, dilation=dilation, stride=stride,
padding_mode=padding_mode, groups=_groups)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token):
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4, 'out_feature': 4, 'stride': 1,
'conv_type': 4}]
|
ShortcutLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6m/c6meipc3hj73stcu7rb5awarajqp6ejfbfufsae27ji2sot5kixg.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %select), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (256 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (5, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (5, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((5, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg1_1, arg0_1, buf0, 320, grid=grid(320), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((5, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((5, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ShortcutLayer(nn.Module):
def __init__(self, idx):
super(ShortcutLayer, self).__init__()
self.idx = idx
def forward(self, x, outputs):
return x + outputs[self.idx]
def get_inputs():
return [torch.rand([5, 4, 4, 4]), torch.rand([5, 4, 4, 4])]
def get_init_inputs():
return [[], {'idx': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (256 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (5, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (5, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((5, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(320)](arg1_1, arg0_1, buf0, 320, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class ShortcutLayerNew(nn.Module):
def __init__(self, idx):
super(ShortcutLayerNew, self).__init__()
self.idx = idx
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| vrindaprabhu/solofy | ShortcutLayer | false | 4,506 | [
"MIT"
] | 0 | d5e26ff20d293c200485c70be6dcd6481afba396 | https://github.com/vrindaprabhu/solofy/tree/d5e26ff20d293c200485c70be6dcd6481afba396 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, idx):
super().__init__()
self.idx = idx
def forward(self, x, outputs):
return x + outputs[self.idx]
def get_inputs():
return [torch.rand([5, 4, 4, 4]), torch.rand([5, 4, 4, 4])]
def get_init_inputs():
return [4]
|
CustomGroupNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/c7/cc7uyyu3dbirvldumnsi4p575t2h7hsmbwn33hjkip5vecgf5mxu.py
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused_native_group_norm_0 = async_compile.triton('triton_per_fused_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[8, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 8
rnumel = 32
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (32*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 32, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 32.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + (x0), tmp21, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
tl.store(out_ptr1 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6w/c6wumqqn26v3bkv5fl454fgbd24shogfigg3dtdmuj5lp6djujr5.py
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm => add_1, mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
triton_poi_fused_native_group_norm_1 = async_compile.triton('triton_poi_fused_native_group_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + ((x4 // 2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((x4 // 2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 32.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf1 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf4 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_0.run(primals_3, buf0, buf1, buf4, 8, 32, grid=grid(8), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
triton_poi_fused_native_group_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del primals_1
del primals_2
return (buf3, primals_3, reinterpret_tensor(buf0, (4, 2, 1), (2, 1, 1), 0), reinterpret_tensor(buf4, (4, 2, 1), (2, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class CustomGroupNorm(torch.nn.Module):
"""
Custom Group Norm which adds n_groups=2 as default parameter
"""
def __init__(self, n_features, n_groups=2):
"""
Parameters
----------
n_features : int
number of input features
n_groups : int
number of normalization groups
"""
super().__init__()
self.norm = torch.nn.GroupNorm(n_groups, n_features)
def forward(self, x):
"""
Forward batch through network
Parameters
----------
x : :class:`torch.Tensor`
batch to forward
Returns
-------
:class:`torch.Tensor`
normalized results
"""
return self.norm(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 8
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 32, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 32.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4 // 2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4 // 2, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 32.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf1 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
buf4 = empty_strided_cuda((4, 2, 1, 1), (2, 1, 8, 8), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_0[grid(8)](primals_3, buf0, buf1,
buf4, 8, 32, XBLOCK=8, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_group_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del primals_1
del primals_2
return buf3, primals_3, reinterpret_tensor(buf0, (4, 2, 1), (2, 1, 1), 0
), reinterpret_tensor(buf4, (4, 2, 1), (2, 1, 1), 0)
class CustomGroupNormNew(torch.nn.Module):
"""
Custom Group Norm which adds n_groups=2 as default parameter
"""
def __init__(self, n_features, n_groups=2):
"""
Parameters
----------
n_features : int
number of input features
n_groups : int
number of normalization groups
"""
super().__init__()
self.norm = torch.nn.GroupNorm(n_groups, n_features)
def forward(self, input_0):
primals_1 = self.norm.weight
primals_2 = self.norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| vuamitom/shapenet | CustomGroupNorm | false | 4,507 | [
"BSD-2-Clause"
] | 0 | 9eb3dadc91801756cb3460707c37146c8176643e | https://github.com/vuamitom/shapenet/tree/9eb3dadc91801756cb3460707c37146c8176643e | import torch
class Model(torch.nn.Module):
"""
Custom Group Norm which adds n_groups=2 as default parameter
"""
def __init__(self, n_features, n_groups=2):
"""
Parameters
----------
n_features : int
number of input features
n_groups : int
number of normalization groups
"""
super().__init__()
self.norm = torch.nn.GroupNorm(n_groups, n_features)
def forward(self, x):
"""
Forward batch through network
Parameters
----------
x : :class:`torch.Tensor`
batch to forward
Returns
-------
:class:`torch.Tensor`
normalized results
"""
return self.norm(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PairwiseNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5j/c5jtu26zvdussso5bqit4kzl5jq6xok4cot5eb5god3praom7dxd.py
# Topologically Sorted Source Nodes: [sub, norm], Original ATen: [aten.sub, aten.linalg_vector_norm]
# Source node to ATen node mapping:
# norm => abs_1, sum_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_1, %slice_2), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, [1]), kwargs = {})
triton_per_fused_linalg_vector_norm_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[2, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_sub_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 2
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (128*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (64 + r1 + (128*x0)), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7s/c7stgdpn75kt2msjx4sz4rd4yqdaszkb2q5c7v44z7bokxr4hapw.py
# Topologically Sorted Source Nodes: [norm, norm_1, norm_2], Original ATen: [aten.linalg_vector_norm, aten.sum, aten.div]
# Source node to ATen node mapping:
# norm => pow_2
# norm_1 => sum_2
# norm_2 => div
# Graph fragment:
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 1.0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_per_fused_div_linalg_vector_norm_sum_1 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 2],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [sub, norm], Original ATen: [aten.sub, aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_sub_0.run(arg0_1, buf0, 2, 64, grid=grid(2), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [norm, norm_1, norm_2], Original ATen: [aten.linalg_vector_norm, aten.sum, aten.div]
triton_per_fused_div_linalg_vector_norm_sum_1.run(buf2, buf0, 1, 2, grid=grid(1), stream=stream0)
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class PairwiseNorm(nn.Module):
def __init__(self, order=1, size_average=True):
super().__init__()
self.order = order
self.average = size_average
def forward(self, inp, target=None):
inp = inp.flatten(1)
assert len(inp) % 2 == 0
samples1, samples2 = inp[::2], inp[1::2]
norm = (samples1 - samples2).norm(p=self.order, dim=1).sum()
if self.average:
norm = norm / len(inp)
return norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_sub_0(in_ptr0, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 2
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 128 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (64 + r1 + 128 * x0), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_sum_1(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((2,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_sub_0[grid(2)](arg0_1, buf0, 2,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_div_linalg_vector_norm_sum_1[grid(1)](buf2, buf0,
1, 2, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
return buf2,
class PairwiseNormNew(nn.Module):
def __init__(self, order=1, size_average=True):
super().__init__()
self.order = order
self.average = size_average
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| vzinche/inferno | PairwiseNorm | false | 4,508 | [
"Apache-2.0"
] | 0 | 91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034 | https://github.com/vzinche/inferno/tree/91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, order=1, size_average=True):
super().__init__()
self.order = order
self.average = size_average
def forward(self, inp, target=None):
inp = inp.flatten(1)
assert len(inp) % 2 == 0
samples1, samples2 = inp[::2], inp[1::2]
norm = (samples1 - samples2).norm(p=self.order, dim=1).sum()
if self.average:
norm = norm / len(inp)
return norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Norm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/y6/cy6s4befadeam4uu2vsmdwzq6g2sy67jcflg3ldkyw2jkwjkdmui.py
# Topologically Sorted Source Nodes: [norm, norm_1], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# norm => abs_1, pow_2, sum_1
# norm_1 => div
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%view,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, None), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, 256), kwargs = {})
triton_per_fused_div_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = 0.00390625
tmp6 = tmp4 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [norm, norm_1], Original ATen: [aten.linalg_vector_norm, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Norm(nn.Module):
def __init__(self, order=1, size_average=True):
super().__init__()
self.order = order
self.average = size_average
def forward(self, inp, target=None):
if target is not None:
inp = inp - target
inp = inp.flatten()
norm = torch.norm(inp, p=self.order)
if self.average:
norm = norm / len(inp)
return norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = 0.00390625
tmp6 = tmp4 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_0[grid(1)](buf1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class NormNew(nn.Module):
def __init__(self, order=1, size_average=True):
super().__init__()
self.order = order
self.average = size_average
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| vzinche/inferno | Norm | false | 4,509 | [
"Apache-2.0"
] | 0 | 91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034 | https://github.com/vzinche/inferno/tree/91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, order=1, size_average=True):
super().__init__()
self.order = order
self.average = size_average
def forward(self, inp, target=None):
if target is not None:
inp = inp - target
inp = inp.flatten()
norm = torch.norm(inp, p=self.order)
if self.average:
norm = norm / len(inp)
return norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ContrastiveLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5o/c5oq5twhtbx5sx55ig76yx3m7ulbqdqfz67owtfmghxe6aktqr5u.py
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, sub, mul_2, sub_1, clamp, pow_2, mul_3, loss, loss_1], Original ATen: [aten.mul, aten.pow, aten.rsub, aten.clamp, aten.add, aten.mean]
# Source node to ATen node mapping:
# clamp => clamp_min
# loss => add
# loss_1 => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.5), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%permute, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %pow_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 0.5), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %permute), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %pow_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {})
triton_per_fused_add_clamp_mean_mul_pow_rsub_0 = async_compile.triton('triton_per_fused_add_clamp_mean_mul_pow_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_mean_mul_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 4
r1 = (rindex // 4) % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp3 = tl.load(in_ptr1 + (r2 + (4*r1) + (64*r0)), None, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp0
tmp8 = tmp7 * tmp1
tmp9 = tmp6 - tmp3
tmp10 = 0.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tmp11 * tmp11
tmp13 = tmp8 * tmp12
tmp14 = tmp5 + tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, sub, mul_2, sub_1, clamp, pow_2, mul_3, loss, loss_1], Original ATen: [aten.mul, aten.pow, aten.rsub, aten.clamp, aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_mean_mul_pow_rsub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ContrastiveLoss(nn.Module):
def __init__(self, margin=1.0, reduction='mean'):
super().__init__()
self.m = margin
assert reduction in ['mean', 'sum', 'none']
self.reduction = reduction
def forward(self, dist, class_):
dist = dist.transpose(0, -1)
loss = 0.5 * class_ * dist ** 2 + (1 - class_) * 0.5 * torch.clamp(
self.m - dist, min=0) ** 2
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_mean_mul_pow_rsub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 4
r1 = rindex // 4 % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp3 = tl.load(in_ptr1 + (r2 + 4 * r1 + 64 * r0), None, eviction_policy
='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp0
tmp8 = tmp7 * tmp1
tmp9 = tmp6 - tmp3
tmp10 = 0.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tmp11 * tmp11
tmp13 = tmp8 * tmp12
tmp14 = tmp5 + tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_mean_mul_pow_rsub_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class ContrastiveLossNew(nn.Module):
def __init__(self, margin=1.0, reduction='mean'):
super().__init__()
self.m = margin
assert reduction in ['mean', 'sum', 'none']
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| vzinche/inferno | ContrastiveLoss | false | 4,510 | [
"Apache-2.0"
] | 0 | 91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034 | https://github.com/vzinche/inferno/tree/91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, margin=1.0, reduction='mean'):
super().__init__()
self.m = margin
assert reduction in ['mean', 'sum', 'none']
self.reduction = reduction
def forward(self, dist, class_):
dist = dist.transpose(0, -1)
loss = 0.5 * class_ * dist ** 2 + (1 - class_) * 0.5 * torch.clamp(
self.m - dist, min=0) ** 2
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
WordPredictor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/vj/cvjsxyxn6gyyvtipzzz4vzmiphntjftekqenbhs4uunn7tb5u3jd.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%select, [0]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5i/c5iaqsrsfvsdc63zdr5cg7f37p7opjrglg57oxsv7o3xmexazhkl.py
# Topologically Sorted Source Nodes: [attn_input], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# attn_input => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%expand, %select], 2), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 4
x3 = (xindex // 8)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((4*x3) + ((-4) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + (x4), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dj/cdjzmtk5emc6dzqhkvu5c2hunn5lmuxuc47c6cxqz34wv4kuxdx7.py
# Topologically Sorted Source Nodes: [attn_scores, attn_scores_1], Original ATen: [aten.relu, aten._softmax]
# Source node to ATen node mapping:
# attn_scores => relu_1
# attn_scores_1 => amax, exp, sub
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%relu_1, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_relu_2 = async_compile.triton('triton_poi_fused__softmax_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = triton_helpers.maximum(tmp1, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ny/cny2tn74oditrr2kjk2zdrdpqulok6k67hyn3d3scy77clhd6evl.py
# Topologically Sorted Source Nodes: [attn_scores_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_scores_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7i/c7i7xxsu2jfndntuz754dt7qepn6ntejzl6j4mrnolhaicepomou.py
# Topologically Sorted Source Nodes: [pred_input], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# pred_input => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %sum_2], 1), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + (x1), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.load(in_ptr2 + (16 + (4*x1) + ((-4) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tl.load(in_ptr3 + (4 + x1), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp22 = tl.load(in_ptr2 + (32 + (4*x1) + ((-4) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr3 + (8 + x1), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp26 = tl.load(in_ptr2 + (48 + (4*x1) + ((-4) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr3 + (12 + x1), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp12, tmp29, tmp30)
tmp32 = tl.where(tmp4, tmp11, tmp31)
tl.store(out_ptr0 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/v4/cv4ko24f26un3axamp426zbnugqu4jhirnvlxjjqhipcugzzgcbk.py
# Topologically Sorted Source Nodes: [pred_hidden], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# pred_hidden => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oo/coooss73fnhko7ikpecnkcvpqw5bmlwugiidvseo44i3dgysybgg.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (1, 8), (8, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_input], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf1, primals_3, primals_1, buf2, 128, grid=grid(128), stream=stream0)
buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [attn_scores, attn_scores_1], Original ATen: [aten.relu, aten._softmax]
triton_poi_fused__softmax_relu_2.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [attn_scores_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [pred_input], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(buf1, primals_3, primals_1, buf6, buf7, 32, grid=grid(32), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf7, reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [pred_hidden], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf9, primals_7, 16, grid=grid(16), stream=stream0)
del primals_7
buf10 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [pred_logit], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_6.run(buf1, primals_3, buf11, 16, grid=grid(16), stream=stream0)
del buf1
del primals_3
return (buf10, reinterpret_tensor(primals_1, (4, 4, 4), (16, 4, 1), 0), buf0, reinterpret_tensor(buf2, (16, 8), (8, 1), 0), buf4, buf7, buf9, primals_8, primals_6, primals_4, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx.operators
class WordPredictor(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def forward(self, encoder_output):
encoder_hiddens, *_ = encoder_output
assert encoder_hiddens.dim() == 3
init_state = self._get_init_state(encoder_hiddens)
attn_scores = self._attention(encoder_hiddens, init_state)
attned_state = (encoder_hiddens * attn_scores).sum(0)
pred_input = torch.cat([init_state, attned_state], 1)
pred_hidden = F.relu(self.hidden_layer(pred_input))
pred_logit = self.output_layer(pred_hidden)
return pred_logit
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'encoder_output_dim': 4, 'hidden_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 4
x3 = xindex // 8
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x3 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = triton_helpers.maximum(tmp1, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = triton_helpers.maximum(tmp1, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr3 + x1, tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 * tmp16
tmp18 = tl.load(in_ptr2 + (16 + 4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp19 = tl.load(in_ptr3 + (4 + x1), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp22 = tl.load(in_ptr2 + (32 + 4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr3 + (8 + x1), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp26 = tl.load(in_ptr2 + (48 + 4 * x1 + (-4 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tl.load(in_ptr3 + (12 + x1), tmp12 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype)
tmp31 = tl.where(tmp12, tmp29, tmp30)
tmp32 = tl.where(tmp4, tmp11, tmp31)
tl.store(out_ptr0 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (1, 8), (8, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4
), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_1[grid(128)](buf1, primals_3, primals_1, buf2,
128, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_4, (8, 1), (1, 8), 0),
alpha=1, beta=1, out=buf4)
del primals_5
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_relu_2[grid(16)](buf4, buf5, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_3[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_4[grid(32)](buf1, primals_3, primals_1, buf6,
buf7, 32, XBLOCK=32, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
extern_kernels.mm(buf7, reinterpret_tensor(primals_6, (8, 4), (1, 8
), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_5[grid(16)](buf9, primals_7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_7
buf10 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_6[grid(16)](buf1,
primals_3, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del primals_3
return buf10, reinterpret_tensor(primals_1, (4, 4, 4), (16, 4, 1), 0
), buf0, reinterpret_tensor(buf2, (16, 8), (8, 1), 0
), buf4, buf7, buf9, primals_8, primals_6, primals_4, buf11
class WordPredictorNew(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def forward(self, input_0):
primals_2 = self.init_layer.weight
primals_3 = self.init_layer.bias
primals_4 = self.attn_layer.weight
primals_5 = self.attn_layer.bias
primals_6 = self.hidden_layer.weight
primals_7 = self.hidden_layer.bias
primals_8 = self.output_layer.weight
primals_9 = self.output_layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| vincentLiangBerkeley/translate | WordPredictor | false | 4,511 | [
"BSD-3-Clause"
] | 0 | 734ae1ad9dfb778935e4825b5ce2687e2df559ea | https://github.com/vincentLiangBerkeley/translate/tree/734ae1ad9dfb778935e4825b5ce2687e2df559ea | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx.operators
class Model(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def forward(self, encoder_output):
encoder_hiddens, *_ = encoder_output
assert encoder_hiddens.dim() == 3
init_state = self._get_init_state(encoder_hiddens)
attn_scores = self._attention(encoder_hiddens, init_state)
attned_state = (encoder_hiddens * attn_scores).sum(0)
pred_input = torch.cat([init_state, attned_state], 1)
pred_hidden = F.relu(self.hidden_layer(pred_input))
pred_logit = self.output_layer(pred_hidden)
return pred_logit
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
PairwiseCrossCorrelation | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6n/c6nzxribncukxumzo6sd4qteovnhn4lgur4ekbupb7htju2zqwug.py
# Topologically Sorted Source Nodes: [on_diag, pow_1, on_diag_1], Original ATen: [aten.add, aten.pow, aten.sum]
# Source node to ATen node mapping:
# on_diag => add
# on_diag_1 => sum_1
# pow_1 => pow_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%diagonal, -1), kwargs = {})
# %copy__default : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%diagonal_default, %add), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%diagonal_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
triton_per_fused_add_pow_sum_0 = async_compile.triton('triton_per_fused_add_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_pow_sum_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_pow_sum_0(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (65*r0), None, eviction_policy='evict_last')
tmp1 = -1.0
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr1 + (tl.broadcast_to(65*r0, [XBLOCK, RBLOCK])), tmp2, None)
tl.store(out_ptr2 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u2/cu2q6xanzhzdl7vgbbpwx7gx4ge6d2wysmm5vs4wjx6a2ddbtkae.py
# Topologically Sorted Source Nodes: [pow_2, off_diag_1, mul, loss, loss_1], Original ATen: [aten.pow, aten.sum, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# loss => add_1
# loss_1 => div
# mul => mul
# off_diag_1 => sum_2
# pow_2 => pow_2
# Graph fragment:
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_5, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %mul), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, 4096), kwargs = {})
triton_red_fused_add_div_mul_pow_sum_1 = async_compile.triton('triton_red_fused_add_div_mul_pow_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_mul_pow_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_mul_pow_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (1 + (65*(r0 // 64)) + (r0 % 64)), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = tl.load(in_out_ptr0 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1])
tmp7 = 1.0
tmp8 = tmp3 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = 0.000244140625
tmp11 = tmp9 * tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [c], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(arg0_1, (64, 2), (1, 128), 0), reinterpret_tensor(arg0_1, (2, 64), (128, 1), 64), out=buf0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [on_diag, pow_1, on_diag_1], Original ATen: [aten.add, aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_pow_sum_0.run(buf0, buf0, buf3, 1, 64, grid=grid(1), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [pow_2, off_diag_1, mul, loss, loss_1], Original ATen: [aten.pow, aten.sum, aten.mul, aten.add, aten.div]
triton_red_fused_add_div_mul_pow_sum_1.run(buf5, buf0, 1, 4032, grid=grid(1), stream=stream0)
del buf0
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class PairwiseCrossCorrelation(nn.Module):
def __init__(self, lambd=1):
super().__init__()
self.lambd = lambd
def off_diagonal(self, x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, inp, target=None):
inp = inp.flatten(1)
assert len(inp) % 2 == 0
samples1, samples2 = inp[::2], inp[1::2]
c = samples1.T @ samples2
on_diag = torch.diagonal(c).add_(-1)
on_diag = torch.pow(on_diag, 2).sum()
off_diag = self.off_diagonal(c)
off_diag = torch.pow(off_diag, 2).sum()
loss = on_diag + self.lambd * off_diag
loss = loss / len(c) ** 2
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_pow_sum_0(in_ptr0, out_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 65 * r0, None, eviction_policy='evict_last')
tmp1 = -1.0
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr1 + tl.broadcast_to(65 * r0, [XBLOCK, RBLOCK]), tmp2, None)
tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_red_fused_add_div_mul_pow_sum_1(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 4032
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (1 + 65 * (r0 // 64) + r0 % 64), rmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = tl.load(in_out_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, 1])
tmp7 = 1.0
tmp8 = tmp3 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = 0.000244140625
tmp11 = tmp9 * tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg0_1, (64, 2), (1, 128), 0),
reinterpret_tensor(arg0_1, (2, 64), (128, 1), 64), out=buf0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_pow_sum_0[grid(1)](buf0, buf0, buf3, 1, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf5 = buf3
del buf3
triton_red_fused_add_div_mul_pow_sum_1[grid(1)](buf5, buf0, 1, 4032,
XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del buf0
return buf5,
class PairwiseCrossCorrelationNew(nn.Module):
def __init__(self, lambd=1):
super().__init__()
self.lambd = lambd
def off_diagonal(self, x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| vzinche/inferno | PairwiseCrossCorrelation | false | 4,512 | [
"Apache-2.0"
] | 0 | 91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034 | https://github.com/vzinche/inferno/tree/91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, lambd=1):
super().__init__()
self.lambd = lambd
def off_diagonal(self, x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, inp, target=None):
inp = inp.flatten(1)
assert len(inp) % 2 == 0
samples1, samples2 = inp[::2], inp[1::2]
c = samples1.T @ samples2
on_diag = torch.diagonal(c).add_(-1)
on_diag = torch.pow(on_diag, 2).sum()
off_diag = self.off_diagonal(c)
off_diag = torch.pow(off_diag, 2).sum()
loss = on_diag + self.lambd * off_diag
loss = loss / len(c) ** 2
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Linear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xg/cxg3op264zx2ec3atkmjwmld6htg2wdbd2inqitp272jkpjbeazc.py
# Topologically Sorted Source Nodes: [weight_mean, weight], Original ATen: [aten.mean, aten.sub]
# Source node to ATen node mapping:
# weight => sub
# weight_mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight_mean, weight], Original ATen: [aten.mean, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_sub_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del buf0
del primals_2
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from warnings import warn
from torch.nn import functional as F
from torch.nn import Linear as normal_linear
import torch.utils.data
from torchvision import transforms as transforms
class Linear(normal_linear):
def __init__(self, *args, **kwargs):
super(Linear, self).__init__(*args, **kwargs)
if self.bias is not None:
warn(
'A Linear layer has bias, which may be not suitable with weight centralization.'
)
def forward(self, input: 'Tensor') ->Tensor:
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True)
weight = weight - weight_mean
return F.linear(input, weight, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from warnings import warn
from torch.nn import Linear as normal_linear
import torch.utils.data
from torchvision import transforms as transforms
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_sub_0[grid(16)](primals_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del buf0
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class LinearNew(normal_linear):
def __init__(self, *args, **kwargs):
super(LinearNew, self).__init__(*args, **kwargs)
if self.bias is not None:
warn(
'A Linear layer has bias, which may be not suitable with weight centralization.'
)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| wang93/pytorch-cifar10 | Linear | false | 4,513 | [
"Apache-2.0"
] | 0 | 07a54dd575aad9b011114352d08fdd9f61e360a1 | https://github.com/wang93/pytorch-cifar10/tree/07a54dd575aad9b011114352d08fdd9f61e360a1 | import torch
from torch import Tensor
from warnings import warn
from torch.nn import functional as F
from torch.nn import Linear as normal_linear
import torch.utils.data
from torchvision import transforms as transforms
class Model(normal_linear):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.bias is not None:
warn(
'A Linear layer has bias, which may be not suitable with weight centralization.'
)
def forward(self, input: 'Tensor') ->Tensor:
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True)
weight = weight - weight_mean
return F.linear(input, weight, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Mnist_CNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wr/cwrjr75ksmgrel4djhr27volr7bzoob6ixyduxm74y2ky6udtlzn.py
# Topologically Sorted Source Nodes: [conv2d, xb_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# xb_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 196) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/iz/cizeqrrbvjbeoqr7ojtcvnagh6alsvquwz2qlmnzzbytbq77tnn4.py
# Topologically Sorted Source Nodes: [conv2d_1, xb_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# xb_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 49) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/23/c237kritlyjcuojdcyxxqqygn46332pp2fgszsmeqpe2chj5uwq2.py
# Topologically Sorted Source Nodes: [conv2d_2, xb_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# xb_3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rr/crrealymt6xnsywuzmxru2w3tcsoludjyewecl5ik2qqtmaogdt2.py
# Topologically Sorted Source Nodes: [xb_4], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# xb_4 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%relu_2, [4, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x0), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (10, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 14, 14), (3136, 196, 14, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, xb_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 12544, grid=grid(12544), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 7, 7), (784, 49, 7, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, xb_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 3136, grid=grid(3136), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 10, 4, 4), (160, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, xb_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_7, 640, grid=grid(640), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 10, 1, 1), (10, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [xb_4], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_3.run(buf5, buf6, 40, grid=grid(40), stream=stream0)
return (reinterpret_tensor(buf6, (4, 10), (10, 1), 0), primals_2, primals_4, primals_6, primals_1, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.quantization
import torch.onnx
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Mnist_CNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1)
def forward(self, xb):
xb = xb.view(-1, 1, 28, 28)
xb = F.relu(self.conv1(xb))
xb = F.relu(self.conv2(xb))
xb = F.relu(self.conv3(xb))
xb = F.avg_pool2d(xb, 4)
return xb.view(-1, xb.size(1))
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.quantization
import torch.onnx
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 196 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 40
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (16, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (10, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 14, 14), (3136, 196, 14, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(12544)](buf1, primals_3,
12544, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 7, 7), (784, 49, 7, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(3136)](buf3, primals_5,
3136, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 10, 4, 4), (160, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(640)](buf5, primals_7, 640,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 10, 1, 1), (10, 1, 1, 1), torch.float32)
triton_poi_fused_avg_pool2d_3[grid(40)](buf5, buf6, 40, XBLOCK=64,
num_warps=1, num_stages=1)
return reinterpret_tensor(buf6, (4, 10), (10, 1), 0
), primals_2, primals_4, primals_6, primals_1, buf1, buf3, buf5
class Mnist_CNNNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| voyageth/PyTorch-tutorials-kr | Mnist_CNN | false | 4,514 | [
"BSD-3-Clause"
] | 0 | 05d2dd5931abfca6ce1e0b297f4ceb7f4eae6239 | https://github.com/voyageth/PyTorch-tutorials-kr/tree/05d2dd5931abfca6ce1e0b297f4ceb7f4eae6239 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.quantization
import torch.onnx
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(16, 10, kernel_size=3, stride=2, padding=1)
def forward(self, xb):
xb = xb.view(-1, 1, 28, 28)
xb = F.relu(self.conv1(xb))
xb = F.relu(self.conv2(xb))
xb = F.relu(self.conv3(xb))
xb = F.avg_pool2d(xb, 4)
return xb.view(-1, xb.size(1))
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return []
|
FrequencyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/el/celh6tzoeqjvwdlh4lut5h2mpwt2pqqku7qv4roievqz5vf32mhc.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# loss => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 192.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x_fft], Original ATen: [aten._fft_r2c]
buf0 = torch.ops.aten._fft_r2c.default(arg0_1, [2, 3], 0, True)
del arg0_1
buf1 = buf0
del buf0
# Topologically Sorted Source Nodes: [y_fft], Original ATen: [aten._fft_r2c]
buf2 = torch.ops.aten._fft_r2c.default(arg1_1, [2, 3], 0, True)
del arg1_1
buf3 = buf2
del buf2
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.sub]
buf4 = torch.ops.aten.sub.Tensor(buf1, buf3)
del buf1
del buf3
buf5 = buf4
del buf4
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.abs]
buf6 = torch.ops.aten.abs.default(buf5)
del buf5
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf9, buf7, 1, 192, grid=grid(1), stream=stream0)
del buf7
return (buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FrequencyLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=0.001):
super(FrequencyLoss, self).__init__()
self.criterion = torch.nn.L1Loss()
def forward(self, x, y):
x_fft = torch.fft.rfft2(x, dim=(2, 3))
y_fft = torch.fft.rfft2(y, dim=(2, 3))
loss = self.criterion(x_fft, y_fft)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, rmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 192.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._fft_r2c.default(arg0_1, [2, 3], 0, True)
del arg0_1
buf1 = buf0
del buf0
buf2 = torch.ops.aten._fft_r2c.default(arg1_1, [2, 3], 0, True)
del arg1_1
buf3 = buf2
del buf2
buf4 = torch.ops.aten.sub.Tensor(buf1, buf3)
del buf1
del buf3
buf5 = buf4
del buf4
buf6 = torch.ops.aten.abs.default(buf5)
del buf5
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((), (), torch.float32)
buf9 = buf8
del buf8
get_raw_stream(0)
triton_per_fused_mean_0[grid(1)](buf9, buf7, 1, 192, XBLOCK=1,
num_warps=2, num_stages=1)
del buf7
return buf9,
class FrequencyLossNew(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=0.001):
super(FrequencyLossNew, self).__init__()
self.criterion = torch.nn.L1Loss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| vztu/DebandingNet | FrequencyLoss | false | 4,515 | [
"MIT"
] | 0 | 4af8e83ffbfc70dc220dd6fea2827fb75796f10c | https://github.com/vztu/DebandingNet/tree/4af8e83ffbfc70dc220dd6fea2827fb75796f10c | import torch
import torch.nn as nn
class Model(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=0.001):
super().__init__()
self.criterion = torch.nn.L1Loss()
def forward(self, x, y):
x_fft = torch.fft.rfft2(x, dim=(2, 3))
y_fft = torch.fft.rfft2(y, dim=(2, 3))
loss = self.criterion(x_fft, y_fft)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FocalLossSigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qm/cqmm47zum25ouiix4f7wvfuakk6y4jnirb6ca36sewd4mw62ehm7.py
# Topologically Sorted Source Nodes: [P, sub_1, pow_2, mul_5, sub_2, log_1, mul_6, sub_3, mul_7, alpha_mask, sub_4, loss_neg, sub, pow_1, mul_1, log, mul_2, mul_3, loss_pos, batch_loss, loss], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.log, aten.add, aten.sum]
# Source node to ATen node mapping:
# P => sigmoid
# alpha_mask => mul
# batch_loss => add
# log => log
# log_1 => log_1
# loss => sum_1
# loss_neg => mul_8
# loss_pos => mul_4
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# Graph fragment:
# %sigmoid : [num_users=4] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, -1.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_2,), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %log_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %sub_3), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.25), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %sub_4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -1.0), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %arg1_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %mul), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_8, %mul_4), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {})
triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0 = async_compile.triton('triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp3 * tmp3
tmp5 = -1.0
tmp6 = tmp4 * tmp5
tmp7 = tl_math.log(tmp3)
tmp8 = tmp6 * tmp7
tmp10 = tmp2 - tmp9
tmp11 = tmp8 * tmp10
tmp12 = 0.25
tmp13 = tmp9 * tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp11 * tmp14
tmp16 = tl_math.log(tmp1)
tmp17 = tmp6 * tmp16
tmp18 = tmp17 * tmp9
tmp19 = tmp18 * tmp13
tmp20 = tmp15 + tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [P, sub_1, pow_2, mul_5, sub_2, log_1, mul_6, sub_3, mul_7, alpha_mask, sub_4, loss_neg, sub, pow_1, mul_1, log, mul_2, mul_3, loss_pos, batch_loss, loss], Original ATen: [aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.log, aten.add, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
class FocalLossSigmoid(nn.Module):
"""
sigmoid version focal loss
"""
def __init__(self, alpha=0.25, gamma=2, size_average=False):
super(FocalLossSigmoid, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
def forward(self, inputs, targets):
inputs.size(0)
inputs.size(1)
P = torch.sigmoid(inputs)
alpha_mask = self.alpha * targets
loss_pos = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(P
) * targets * alpha_mask
loss_neg = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(1 - P) * (
1 - targets) * (1 - alpha_mask)
batch_loss = loss_neg + loss_pos
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp4 = tmp3 * tmp3
tmp5 = -1.0
tmp6 = tmp4 * tmp5
tmp7 = tl_math.log(tmp3)
tmp8 = tmp6 * tmp7
tmp10 = tmp2 - tmp9
tmp11 = tmp8 * tmp10
tmp12 = 0.25
tmp13 = tmp9 * tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp11 * tmp14
tmp16 = tl_math.log(tmp1)
tmp17 = tmp6 * tmp16
tmp18 = tmp17 * tmp9
tmp19 = tmp18 * tmp13
tmp20 = tmp15 + tmp19
tmp21 = tl.broadcast_to(tmp20, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_log_mul_pow_rsub_sigmoid_sum_0[grid(1)](arg0_1,
arg1_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FocalLossSigmoidNew(nn.Module):
"""
sigmoid version focal loss
"""
def __init__(self, alpha=0.25, gamma=2, size_average=False):
super(FocalLossSigmoidNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| wangbingok1118/SSD_Pytorch | FocalLossSigmoid | false | 4,516 | [
"MIT"
] | 0 | 8d3f924671cec367c3c420eba2f002cc5b5181bb | https://github.com/wangbingok1118/SSD_Pytorch/tree/8d3f924671cec367c3c420eba2f002cc5b5181bb | import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
class Model(nn.Module):
"""
sigmoid version focal loss
"""
def __init__(self, alpha=0.25, gamma=2, size_average=False):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
def forward(self, inputs, targets):
inputs.size(0)
inputs.size(1)
P = torch.sigmoid(inputs)
alpha_mask = self.alpha * targets
loss_pos = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(P
) * targets * alpha_mask
loss_neg = -1.0 * torch.pow(1 - P, self.gamma) * torch.log(1 - P) * (
1 - targets) * (1 - alpha_mask)
batch_loss = loss_neg + loss_pos
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MedianPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lx/clxzn2tcfqdeak7h7odiokny3naddwbzp2nxtk2os256jlv5cidb.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%unfold_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3) % 3
x2 = (xindex // 9) % 4
x3 = (xindex // 36) % 4
x4 = (xindex // 144)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0 + x2))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1 + x3))))) + (16*x4)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x5), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 3, 3), (576, 144, 36, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 2304, grid=grid(2304), stream=stream0)
del arg0_1
# Topologically Sorted Source Nodes: [median], Original ATen: [aten.median]
buf1 = torch.ops.aten.median.dim(reinterpret_tensor(buf0, (4, 4, 4, 4, 9), (576, 144, 36, 9, 1), 0), -1)
del buf0
buf2 = buf1[0]
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _quadruple
class MedianPool2d(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=True):
super(MedianPool2d, self).__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding)
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - ih % self.stride[0], 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - iw % self.stride[1], 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = pl, pr, pt, pb
else:
padding = self.padding
return padding
def forward(self, x):
x = F.pad(x, self._padding(x), mode='reflect')
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1],
self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _quadruple
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 3
x2 = xindex // 9 % 4
x3 = xindex // 36 % 4
x4 = xindex // 144
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0 + x2)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1 + x3)) + 16 *
x4), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x5, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 3, 3), (576, 144, 36, 9, 3,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(2304)](arg0_1, buf0, 2304, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = torch.ops.aten.median.dim(reinterpret_tensor(buf0, (4, 4, 4,
4, 9), (576, 144, 36, 9, 1), 0), -1)
del buf0
buf2 = buf1[0]
del buf1
return buf2,
class MedianPool2dNew(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=True):
super(MedianPool2dNew, self).__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding)
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - ih % self.stride[0], 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - iw % self.stride[1], 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = pl, pr, pt, pb
else:
padding = self.padding
return padding
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| vztu/DebandingNet | MedianPool2d | false | 4,517 | [
"MIT"
] | 0 | 4af8e83ffbfc70dc220dd6fea2827fb75796f10c | https://github.com/vztu/DebandingNet/tree/4af8e83ffbfc70dc220dd6fea2827fb75796f10c | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from torch.nn.modules.utils import _quadruple
class Model(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=True):
super().__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding)
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - ih % self.stride[0], 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - iw % self.stride[1], 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = pl, pr, pt, pb
else:
padding = self.padding
return padding
def forward(self, x):
x = F.pad(x, self._padding(x), mode='reflect')
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1],
self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ParseL1loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/w7/cw7qeuurxwsvjsp2nzkolkm2nsn7al6jrrawccr3pzcp6esgfeor.py
# Topologically Sorted Source Nodes: [eq, mask, mul, mul_1, loss, sum_1, add, loss_1], Original ATen: [aten.eq, aten._to_copy, aten.mul, aten.sub, aten.abs, aten.sum, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# eq => eq
# loss => abs_1, sub, sum_1
# loss_1 => div
# mask => convert_element_type
# mul => mul
# mul_1 => mul_1
# sum_1 => sum_2
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg0_1, 1), kwargs = {})
# %convert_element_type : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %convert_element_type), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %convert_element_type), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 0.0001), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {})
triton_per_fused__to_copy_abs_add_div_eq_mul_sub_sum_0 = async_compile.triton('triton_per_fused__to_copy_abs_add_div_eq_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_abs_add_div_eq_mul_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_abs_add_div_eq_mul_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp2 = 1.0
tmp3 = tmp1 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = tmp0 * tmp4
tmp7 = tmp6 * tmp4
tmp8 = tmp5 - tmp7
tmp9 = tl_math.abs(tmp8)
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.broadcast_to(tmp4, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 0.0001
tmp17 = tmp15 + tmp16
tmp18 = tmp12 / tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [eq, mask, mul, mul_1, loss, sum_1, add, loss_1], Original ATen: [aten.eq, aten._to_copy, aten.mul, aten.sub, aten.abs, aten.sum, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_abs_add_div_eq_mul_sub_sum_0.run(buf2, arg1_1, arg0_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class ParseL1loss(nn.Module):
def __init__(self):
super(ParseL1loss, self).__init__()
def forward(self, output, target, mask):
mask = (mask == 1).float()
loss = F.l1_loss(output * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 0.0001)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_abs_add_div_eq_mul_sub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp2 = 1.0
tmp3 = tmp1 == tmp2
tmp4 = tmp3.to(tl.float32)
tmp5 = tmp0 * tmp4
tmp7 = tmp6 * tmp4
tmp8 = tmp5 - tmp7
tmp9 = tl_math.abs(tmp8)
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.broadcast_to(tmp4, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 0.0001
tmp17 = tmp15 + tmp16
tmp18 = tmp12 / tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_abs_add_div_eq_mul_sub_sum_0[grid(1)](buf2,
arg1_1, arg0_1, arg2_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf2,
class ParseL1lossNew(nn.Module):
def __init__(self):
super(ParseL1lossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| weberhen/NonCuboidRoom | ParseL1loss | false | 4,518 | [
"MIT"
] | 0 | 871a77941697f1457cdae541b8ffcdce4f9134e3 | https://github.com/weberhen/NonCuboidRoom/tree/871a77941697f1457cdae541b8ffcdce4f9134e3 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, target, mask):
mask = (mask == 1).float()
loss = F.l1_loss(output * mask, target * mask, size_average=False)
loss = loss / (mask.sum() + 0.0001)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
WassersteinLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yb/cyb57rylm54674ljvszfqxui4mqu3vz6w3nxnmj5kbxrdbc23434.py
# Topologically Sorted Source Nodes: [mean, mean_1, sub], Original ATen: [aten.mean, aten.sub]
# Source node to ATen node mapping:
# mean => mean
# mean_1 => mean_1
# sub => sub
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg0_1,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %mean_1), kwargs = {})
triton_per_fused_mean_sub_0 = async_compile.triton('triton_per_fused_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp3 / tmp8
tmp10 = tmp7 / tmp8
tmp11 = tmp9 - tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mean, mean_1, sub], Original ATen: [aten.mean, aten.sub]
stream0 = get_raw_stream(0)
triton_per_fused_mean_sub_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class WassersteinLoss(nn.Module):
"""For WGAN."""
def forward(self, real, fake):
return real.mean() - fake.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(tl.sum(tmp1, 0))
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = 256.0
tmp9 = tmp3 / tmp8
tmp10 = tmp7 / tmp8
tmp11 = tmp9 - tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_sub_0[grid(1)](buf2, arg0_1, arg1_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class WassersteinLossNew(nn.Module):
"""For WGAN."""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| wegroupwolves/fastai | WassersteinLoss | false | 4,519 | [
"Apache-2.0"
] | 0 | df40df403e05e132411f0f7abc7ec33c86e58bb9 | https://github.com/wegroupwolves/fastai/tree/df40df403e05e132411f0f7abc7ec33c86e58bb9 | import torch
from torch import nn
class Model(nn.Module):
"""For WGAN."""
def forward(self, real, fake):
return real.mean() - fake.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SpatialAttn | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bi/cbibg4lc7dc2sbbxe7z4v4ebtulhxgixrw57yvrp2yyfgel2ghb5.py
# Topologically Sorted Source Nodes: [sum_1, itruediv], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# itruediv => div
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %sum_1), kwargs = {})
triton_per_fused_div_sum_0 = async_compile.triton('triton_per_fused_div_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_sum_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr0 + (16 + r0), None)
tmp3 = tl.load(in_ptr0 + (32 + r0), None)
tmp5 = tl.load(in_ptr0 + (48 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp8 / tmp11
tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/o4/co4t5cpqvwzgwid4gtpymik6alub22ihe334fobcw5otrmsoxavz.py
# Topologically Sorted Source Nodes: [itruediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# itruediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select, %sum_1), kwargs = {})
# %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%view_1, %div, 0, 0), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = 4.0
tmp12 = tmp10 / tmp11
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xf/cxfi22rac3t4r7mazbza6uabzf5oenc5fn7fw2a7l7czenb5tshs.py
# Topologically Sorted Source Nodes: [sum_2, sum_3, itruediv_2], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# itruediv_2 => div_2
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_8,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_16,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select_17, %sum_3), kwargs = {})
triton_per_fused_div_sum_2 = async_compile.triton('triton_per_fused_div_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_sum_2(in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp3 = tl.load(in_ptr0 + (r0 + (4*((r0 % 4) // 4))), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + (4*((r0 % 4) // 4))), None)
tmp12 = tl.load(in_ptr0 + (r0 + (8*((r0 % 4) // 4))), None)
tmp13 = tl.load(in_ptr0 + (16 + r0 + (8*((r0 % 4) // 4))), None)
tmp18 = tl.load(in_ptr0 + (32 + r0 + (8*((r0 % 4) // 4))), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl.full([1, 1], 2, tl.int32)
tmp10 = tmp9 == tmp0
tmp11 = tmp0 == tmp0
tmp14 = tl.where(tmp2, tmp12, tmp13)
tmp15 = tmp14 / tmp8
tmp16 = tl.where(tmp11, tmp15, tmp14)
tmp17 = tmp9 == tmp1
tmp19 = tl.where(tmp17, tmp12, tmp18)
tmp20 = tl.where(tmp10, tmp15, tmp19)
tmp21 = tl.where(tmp10, tmp16, tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp21 / tmp24
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp25, None)
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tp/ctp65xzcaq3yt6j5igmjpdwgic52mltna4dnuwk4o2jezwaqqwcq.py
# Topologically Sorted Source Nodes: [itruediv_2], Original ATen: [aten.div]
# Source node to ATen node mapping:
# itruediv_2 => div_2
# Graph fragment:
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%select_17, %sum_3), kwargs = {})
# %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%view_19, %div_2, 0, 2), kwargs = {})
triton_poi_fused_div_3 = async_compile.triton('triton_poi_fused_div_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x0 + (8*((x0 % 4) // 4))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (16 + x0 + (8*((x0 % 4) // 4))), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (0))
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr1 + (x0 + (8*((x0 % 4) // 4)) + (16*x1)), xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 1, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = tmp4 == tmp4
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp4 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp14 = tmp11 / tmp13
tmp15 = tl.where(tmp6, tmp14, tmp11)
tmp16 = tmp0 == tmp7
tmp18 = tl.where(tmp16, tmp9, tmp17)
tmp19 = tl.where(tmp5, tmp14, tmp18)
tmp20 = tl.where(tmp5, tmp15, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wl/cwlv3v7i5tdm7rgttqnrgro6gjlltqdiyasmyr7rg7nydh3gicwq.py
# Topologically Sorted Source Nodes: [sum_4], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_4 => sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%select_24,), kwargs = {})
triton_per_fused_sum_4 = async_compile.triton('triton_per_fused_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_4(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp3 = tl.load(in_ptr0 + (32 + r0 + (4*((r0 % 4) // 4))), None)
tmp4 = tl.load(in_ptr0 + (48 + r0 + (4*((r0 % 4) // 4))), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tl.full([1, 1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jj/cjjqjoipyiqihfkftp47jgsxj24bv47ixmjupvxwkuttwgjj6aii.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_7 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%view_32, %select_26, 0, 3), kwargs = {})
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp6 = tl.load(in_ptr0 + (32 + x0 + (8*((x0 % 4) // 4))), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (48 + x0 + (8*((x0 % 4) // 4))), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (0))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (x0 + (8*((x0 % 4) // 4)) + (16*x1)), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 2, tl.int32)
tmp5 = tmp1 == tmp4
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp11 = tmp8 / tmp10
tmp12 = tl.where(tmp3, tmp11, tmp8)
tmp13 = tmp0 == tmp4
tmp15 = tl.where(tmp13, tmp6, tmp14)
tmp16 = tl.where(tmp2, tmp11, tmp15)
tmp17 = tl.where(tmp2, tmp12, tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, itruediv], Original ATen: [aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_sum_0.run(arg0_1, buf1, 1, 16, grid=grid(1), stream=stream0)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [itruediv], Original ATen: [aten.div]
triton_poi_fused_div_1.run(buf1, arg0_1, buf2, 64, grid=grid(64), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sum_2, sum_3, itruediv_2], Original ATen: [aten.sum, aten.div]
triton_per_fused_div_sum_2.run(buf2, buf3, buf5, 1, 16, grid=grid(1), stream=stream0)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [itruediv_2], Original ATen: [aten.div]
triton_poi_fused_div_3.run(buf5, buf2, buf3, buf6, 64, grid=grid(64), stream=stream0)
del buf5
buf7 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [sum_4], Original ATen: [aten.sum]
triton_per_fused_sum_4.run(buf6, buf7, 1, 16, grid=grid(1), stream=stream0)
buf8 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(buf6, buf7, buf8, 64, grid=grid(64), stream=stream0)
del buf6
del buf7
return (reinterpret_tensor(buf8, (4, 1, 4, 4), (16, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SpatialAttn(nn.Module):
"""Spatial Attention Layer"""
def __init__(self):
super(SpatialAttn, self).__init__()
def forward(self, x):
x = x.mean(1, keepdim=True)
h = x.size(2)
w = x.size(3)
x = x.view(x.size(0), -1)
z = x
for b in range(x.size(0)):
z[b] /= torch.sum(z[b])
z = z.view(x.size(0), 1, h, w)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_sum_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (16 + r0), None)
tmp3 = tl.load(in_ptr0 + (32 + r0), None)
tmp5 = tl.load(in_ptr0 + (48 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp8 / tmp11
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = 4.0
tmp12 = tmp10 / tmp11
tmp13 = tl.where(tmp2, tmp3, tmp12)
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_per_fused_div_sum_2(in_ptr0, out_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp3 = tl.load(in_ptr0 + (r0 + 4 * (r0 % 4 // 4)), None)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 4 * (r0 % 4 // 4)), None)
tmp12 = tl.load(in_ptr0 + (r0 + 8 * (r0 % 4 // 4)), None)
tmp13 = tl.load(in_ptr0 + (16 + r0 + 8 * (r0 % 4 // 4)), None)
tmp18 = tl.load(in_ptr0 + (32 + r0 + 8 * (r0 % 4 // 4)), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl.full([1, 1], 2, tl.int32)
tmp10 = tmp9 == tmp0
tmp11 = tmp0 == tmp0
tmp14 = tl.where(tmp2, tmp12, tmp13)
tmp15 = tmp14 / tmp8
tmp16 = tl.where(tmp11, tmp15, tmp14)
tmp17 = tmp9 == tmp1
tmp19 = tl.where(tmp17, tmp12, tmp18)
tmp20 = tl.where(tmp10, tmp15, tmp19)
tmp21 = tl.where(tmp10, tmp16, tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = tmp21 / tmp24
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp25, None)
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused_div_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (16 + x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + 0)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr1 + (x0 + 8 * (x0 % 4 // 4) + 16 * x1), xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 1, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = tmp4 == tmp4
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp4 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp14 = tmp11 / tmp13
tmp15 = tl.where(tmp6, tmp14, tmp11)
tmp16 = tmp0 == tmp7
tmp18 = tl.where(tmp16, tmp9, tmp17)
tmp19 = tl.where(tmp5, tmp14, tmp18)
tmp20 = tl.where(tmp5, tmp15, tmp19)
tmp21 = tl.where(tmp2, tmp3, tmp20)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_per_fused_sum_4(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp3 = tl.load(in_ptr0 + (32 + r0 + 4 * (r0 % 4 // 4)), None)
tmp4 = tl.load(in_ptr0 + (48 + r0 + 4 * (r0 % 4 // 4)), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tl.full([1, 1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp6 = tl.load(in_ptr0 + (32 + x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (48 + x0 + 8 * (x0 % 4 // 4)), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (x0 + 8 * (x0 % 4 // 4) + 16 * x1), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp4 = tl.full([1], 2, tl.int32)
tmp5 = tmp1 == tmp4
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp11 = tmp8 / tmp10
tmp12 = tl.where(tmp3, tmp11, tmp8)
tmp13 = tmp0 == tmp4
tmp15 = tl.where(tmp13, tmp6, tmp14)
tmp16 = tl.where(tmp2, tmp11, tmp15)
tmp17 = tl.where(tmp2, tmp12, tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((16,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_div_sum_0[grid(1)](arg0_1, buf1, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_div_1[grid(64)](buf1, arg0_1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf5 = buf1
del buf1
triton_per_fused_div_sum_2[grid(1)](buf2, buf3, buf5, 1, 16, XBLOCK
=1, num_warps=2, num_stages=1)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_div_3[grid(64)](buf5, buf2, buf3, buf6, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del buf5
buf7 = buf3
del buf3
triton_per_fused_sum_4[grid(1)](buf6, buf7, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf8 = buf2
del buf2
triton_poi_fused_5[grid(64)](buf6, buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf6
del buf7
return reinterpret_tensor(buf8, (4, 1, 4, 4), (16, 16, 4, 1), 0),
class SpatialAttnNew(nn.Module):
"""Spatial Attention Layer"""
def __init__(self):
super(SpatialAttnNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| wangminjie920705/Part-reid | SpatialAttn | false | 4,520 | [
"MIT"
] | 0 | 34a1e968a2eab692ba810332f309e82b441793f6 | https://github.com/wangminjie920705/Part-reid/tree/34a1e968a2eab692ba810332f309e82b441793f6 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Spatial Attention Layer"""
def __init__(self):
super().__init__()
def forward(self, x):
x = x.mean(1, keepdim=True)
h = x.size(2)
w = x.size(3)
x = x.view(x.size(0), -1)
z = x
for b in range(x.size(0)):
z[b] /= torch.sum(z[b])
z = z.view(x.size(0), 1, h, w)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [cross_entropy_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy_1 => amax_1, sub_2
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kh/ckh5gimqiqdkf7nauozqrdjc2zt322h57wtvsnk75mdfe6t6pqvl.py
# Topologically Sorted Source Nodes: [cross_entropy_1, logpt, pt, sub, pow_1, neg_1, focal_loss, balanced_focal_loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.exp, aten.rsub, aten.pow]
# Source node to ATen node mapping:
# balanced_focal_loss => mul_3
# cross_entropy_1 => div_1, exp_1, log_2, mul_1, neg_1, sub_3, sum_3, sum_4
# focal_loss => mul_2
# logpt => neg_2
# neg_1 => neg_3
# pow_1 => pow_1
# pt => exp_2
# sub => sub_4
# Graph fragment:
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_3,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %log_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %arg0_1), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_4,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg_1, 64), kwargs = {})
# %neg_2 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%div_1,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_2,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_4, 2), kwargs = {})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_3, %neg_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, 0.25), kwargs = {})
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = 1.0
tmp25 = tmp24 - tmp23
tmp26 = tmp25 * tmp25
tmp27 = -tmp26
tmp28 = tmp27 * tmp22
tmp29 = 0.25
tmp30 = tmp28 * tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp30, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy_1], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [cross_entropy_1, logpt, pt, sub, pow_1, neg_1, focal_loss, balanced_focal_loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.exp, aten.rsub, aten.pow]
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1.run(buf2, buf0, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, focusing_param=2, balance_param=0.25):
super(FocalLoss, self).__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
def forward(self, output, target):
cross_entropy = F.cross_entropy(output, target)
torch.log(cross_entropy)
logpt = -F.cross_entropy(output, target)
pt = torch.exp(logpt)
focal_loss = -(1 - pt) ** self.focusing_param * logpt
balanced_focal_loss = self.balance_param * focal_loss
return balanced_focal_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tmp22 = -tmp21
tmp23 = tl_math.exp(tmp22)
tmp24 = 1.0
tmp25 = tmp24 - tmp23
tmp26 = tmp25 * tmp25
tmp27 = -tmp26
tmp28 = tmp27 * tmp22
tmp29 = 0.25
tmp30 = tmp28 * tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_exp_mul_neg_pow_rsub_sum_1[grid(1)](
buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf2,
class FocalLossNew(nn.Module):
def __init__(self, focusing_param=2, balance_param=0.25):
super(FocalLossNew, self).__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| wanghao15536870732/plants_disease_classify | FocalLoss | false | 4,521 | [
"Apache-2.0"
] | 0 | 6d0d1d39f0ec15fc2bd523142c5c403a1577da84 | https://github.com/wanghao15536870732/plants_disease_classify/tree/6d0d1d39f0ec15fc2bd523142c5c403a1577da84 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, focusing_param=2, balance_param=0.25):
super().__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
def forward(self, output, target):
cross_entropy = F.cross_entropy(output, target)
torch.log(cross_entropy)
logpt = -F.cross_entropy(output, target)
pt = torch.exp(logpt)
focal_loss = -(1 - pt) ** self.focusing_param * logpt
balanced_focal_loss = self.balance_param * focal_loss
return balanced_focal_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SigmoidRange | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ua/cuauaagx42oag7oz2h5z2jowntv7q2icciobr7bfvnumylyzkted.py
# Topologically Sorted Source Nodes: [sigmoid, mul, add], Original ATen: [aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, 0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 4), kwargs = {})
triton_poi_fused_add_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.0
tmp3 = tmp1 * tmp2
tmp4 = 4.0
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul, add], Original ATen: [aten.sigmoid, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
def sigmoid_range(x, low, high):
"""Sigmoid function with range `(low, high)`"""
return torch.sigmoid(x) * (high - low) + low
class SigmoidRange(nn.Module):
"""Sigmoid module with range `(low,x_max)`"""
def __init__(self, low, high):
super().__init__()
self.low, self.high = low, high
def forward(self, x):
return sigmoid_range(x, self.low, self.high)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'low': 4, 'high': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.0
tmp3 = tmp1 * tmp2
tmp4 = 4.0
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def sigmoid_range(x, low, high):
"""Sigmoid function with range `(low, high)`"""
return torch.sigmoid(x) * (high - low) + low
class SigmoidRangeNew(nn.Module):
"""Sigmoid module with range `(low,x_max)`"""
def __init__(self, low, high):
super().__init__()
self.low, self.high = low, high
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| wegroupwolves/fastai | SigmoidRange | false | 4,522 | [
"Apache-2.0"
] | 0 | df40df403e05e132411f0f7abc7ec33c86e58bb9 | https://github.com/wegroupwolves/fastai/tree/df40df403e05e132411f0f7abc7ec33c86e58bb9 | import torch
from torch import nn
def sigmoid_range(x, low, high):
"""Sigmoid function with range `(low, high)`"""
return torch.sigmoid(x) * (high - low) + low
class Model(nn.Module):
"""Sigmoid module with range `(low,x_max)`"""
def __init__(self, low, high):
super().__init__()
self.low, self.high = low, high
def forward(self, x):
return sigmoid_range(x, self.low, self.high)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
TemporalRelation | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/4m/c4mb3evaolfrjioe7mq5eta334htzcfywer2x4df2y4knwf5l2pc.py
# Topologically Sorted Source Nodes: [relation_feature, truediv, add_4], Original ATen: [aten.stack, aten.div, aten.add]
# Source node to ATen node mapping:
# add_4 => add_4
# relation_feature => cat
# truediv => div
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%add, %add_1, %add_2, %add_3],), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_2), kwargs = {})
triton_poi_fused_add_div_stack_0 = async_compile.triton('triton_poi_fused_add_div_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_stack_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_stack_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x0 = xindex % 16
x2 = xindex
tmp41 = tl.load(in_ptr1 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (64 + x0 + (16*x1)), tmp4 & xmask, other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1))), tmp13 & xmask, other=0.0)
tmp15 = tl.load(in_ptr0 + (128 + x0 + (16*((-4) + x1))), tmp13 & xmask, other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 12, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr0 + (64 + x0 + (16*((-8) + x1))), tmp22 & xmask, other=0.0)
tmp24 = tl.load(in_ptr0 + (192 + x0 + (16*((-8) + x1))), tmp22 & xmask, other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tmp29 = tl.full([1], 16, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tl.load(in_ptr0 + (128 + x0 + (16*((-12) + x1))), tmp28 & xmask, other=0.0)
tmp32 = tl.load(in_ptr0 + (192 + x0 + (16*((-12) + x1))), tmp28 & xmask, other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tmp39 = 0.5
tmp40 = tmp38 * tmp39
tmp42 = tmp40 + tmp41
tl.store(in_out_ptr0 + (x2), tmp42, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_feats], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [relation_feature, truediv, add_4], Original ATen: [aten.stack, aten.div, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_stack_0.run(buf2, buf0, primals_2, 256, grid=grid(256), stream=stream0)
del buf0
return (buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class TemporalRelation(nn.Module):
def __init__(self, feat_dim, time_window=1):
super(TemporalRelation, self).__init__()
self.time_window = time_window
self.feat_dim = feat_dim
self.WT = nn.Linear(self.feat_dim, self.feat_dim, bias=False)
def forward(self, feats):
relation_feature = []
att_feats = self.WT(feats)
for t in range(0, att_feats.size()[0], 1):
if t < self.time_window:
prev = att_feats[0, :, :]
else:
prev = att_feats[t - 1, :, :]
if t == att_feats.size()[0] - 1:
next = att_feats[t, :, :]
else:
next = att_feats[t + 1, :, :]
relation_feature.append(prev + next)
relation_feature = torch.stack(relation_feature, dim=0)
return relation_feature / 2 + feats
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'feat_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_stack_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x0 = xindex % 16
x2 = xindex
tmp41 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (64 + x0 + 16 * x1), tmp4 & xmask, other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1)), tmp13 & xmask, other=0.0)
tmp15 = tl.load(in_ptr0 + (128 + x0 + 16 * (-4 + x1)), tmp13 & xmask,
other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 12, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr0 + (64 + x0 + 16 * (-8 + x1)), tmp22 & xmask,
other=0.0)
tmp24 = tl.load(in_ptr0 + (192 + x0 + 16 * (-8 + x1)), tmp22 & xmask,
other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tl.full([1], 16, tl.int64)
tmp31 = tl.load(in_ptr0 + (128 + x0 + 16 * (-12 + x1)), tmp28 & xmask,
other=0.0)
tmp32 = tl.load(in_ptr0 + (192 + x0 + 16 * (-12 + x1)), tmp28 & xmask,
other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp28, tmp33, tmp34)
tmp36 = tl.where(tmp22, tmp27, tmp35)
tmp37 = tl.where(tmp13, tmp18, tmp36)
tmp38 = tl.where(tmp4, tmp9, tmp37)
tmp39 = 0.5
tmp40 = tmp38 * tmp39
tmp42 = tmp40 + tmp41
tl.store(in_out_ptr0 + x2, tmp42, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_add_div_stack_0[grid(256)](buf2, buf0, primals_2,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0)
class TemporalRelationNew(nn.Module):
def __init__(self, feat_dim, time_window=1):
super(TemporalRelationNew, self).__init__()
self.time_window = time_window
self.feat_dim = feat_dim
self.WT = nn.Linear(self.feat_dim, self.feat_dim, bias=False)
def forward(self, input_0):
primals_1 = self.WT.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| weiyi1991/UA_Concurrent | TemporalRelation | false | 4,523 | [
"MIT"
] | 0 | 11238c778c60095abf326800d6e6a13a643bf071 | https://github.com/weiyi1991/UA_Concurrent/tree/11238c778c60095abf326800d6e6a13a643bf071 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, feat_dim, time_window=1):
super().__init__()
self.time_window = time_window
self.feat_dim = feat_dim
self.WT = nn.Linear(self.feat_dim, self.feat_dim, bias=False)
def forward(self, feats):
relation_feature = []
att_feats = self.WT(feats)
for t in range(0, att_feats.size()[0], 1):
if t < self.time_window:
prev = att_feats[0, :, :]
else:
prev = att_feats[t - 1, :, :]
if t == att_feats.size()[0] - 1:
next = att_feats[t, :, :]
else:
next = att_feats[t + 1, :, :]
relation_feature.append(prev + next)
relation_feature = torch.stack(relation_feature, dim=0)
return relation_feature / 2 + feats
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
LinearNormalGamma | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/le/clerp2w6xawitag3uutlvkacli4mlweewqyozrmudnmz57qhywvk.py
# Topologically Sorted Source Nodes: [exp, add, log], Original ATen: [aten.exp, aten.add, aten.log]
# Source node to ATen node mapping:
# add => add
# exp => exp
# log => log
# Graph fragment:
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%squeeze_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
triton_poi_fused_add_exp_log_0 = async_compile.triton('triton_poi_fused_add_exp_log_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_log_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_log_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tl.store(out_ptr0 + (x0), tmp1, xmask)
tl.store(out_ptr1 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/i4/ci4paqoe5byi3o7n4kztkuwnt5udazlxiowcgyjplzgzjjmmuy6f.py
# Topologically Sorted Source Nodes: [exp_1, add_1, log_1, add_2], Original ATen: [aten.exp, aten.add, aten.log]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# exp_1 => exp_1
# log_1 => log_1
# Graph fragment:
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%squeeze_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, 1), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_1, 1), kwargs = {})
triton_poi_fused_add_exp_log_1 = async_compile.triton('triton_poi_fused_add_exp_log_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_log_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_log_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = tmp4 + tmp2
tl.store(out_ptr0 + (x0), tmp1, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rl/crlkvt5stdc3g6nx4yvdiouthpmml7xxprrt3ima4zn536sjnzj4.py
# Topologically Sorted Source Nodes: [exp_2, add_3, log_2], Original ATen: [aten.exp, aten.add, aten.log]
# Source node to ATen node mapping:
# add_3 => add_3
# exp_2 => exp_2
# log_2 => log_2
# Graph fragment:
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%squeeze_3,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_2, 1), kwargs = {})
# %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_3,), kwargs = {})
triton_poi_fused_add_exp_log_2 = async_compile.triton('triton_poi_fused_add_exp_log_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_log_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_log_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tl.store(out_ptr0 + (x0), tmp1, xmask)
tl.store(out_ptr1 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf2 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, add, log], Original ATen: [aten.exp, aten.add, aten.log]
stream0 = get_raw_stream(0)
triton_poi_fused_add_exp_log_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp_1, add_1, log_1, add_2], Original ATen: [aten.exp, aten.add, aten.log]
triton_poi_fused_add_exp_log_1.run(buf0, buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf6 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp_2, add_3, log_2], Original ATen: [aten.exp, aten.add, aten.log]
triton_poi_fused_add_exp_log_2.run(buf0, buf5, buf6, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf0, (4, 64), (256, 4), 0), buf2, buf4, buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class LinearNormalGamma(nn.Module):
def __init__(self, in_chanels, out_channels):
super().__init__()
self.linear = nn.Linear(in_chanels, out_channels * 4)
def evidence(self, x):
return torch.log(torch.exp(x) + 1)
def forward(self, x):
pred = self.linear(x).view(x.shape[0], -1, 4)
mu, logv, logalpha, logbeta = [w.squeeze(-1) for w in torch.split(
pred, 1, dim=-1)]
return mu, self.evidence(logv), self.evidence(logalpha
) + 1, self.evidence(logbeta)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chanels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_exp_log_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_exp_log_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp5 = tmp4 + tmp2
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_exp_log_2(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tl.store(out_ptr0 + x0, tmp1, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf2 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_log_0[grid(256)](buf0, buf1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
triton_poi_fused_add_exp_log_1[grid(256)](buf0, buf3, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf6 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
triton_poi_fused_add_exp_log_2[grid(256)](buf0, buf5, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return reinterpret_tensor(buf0, (4, 64), (256, 4), 0
), buf2, buf4, buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf5
class LinearNormalGammaNew(nn.Module):
def __init__(self, in_chanels, out_channels):
super().__init__()
self.linear = nn.Linear(in_chanels, out_channels * 4)
def evidence(self, x):
return torch.log(torch.exp(x) + 1)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1], output[2], output[3]
| wanzysky/evidential-deep-learning | LinearNormalGamma | false | 4,524 | [
"Apache-2.0"
] | 0 | 71ebd59ab3a4b66c38d919e8aa9ad3711a416796 | https://github.com/wanzysky/evidential-deep-learning/tree/71ebd59ab3a4b66c38d919e8aa9ad3711a416796 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, in_chanels, out_channels):
super().__init__()
self.linear = nn.Linear(in_chanels, out_channels * 4)
def evidence(self, x):
return torch.log(torch.exp(x) + 1)
def forward(self, x):
pred = self.linear(x).view(x.shape[0], -1, 4)
mu, logv, logalpha, logbeta = [w.squeeze(-1) for w in torch.split(
pred, 1, dim=-1)]
return mu, self.evidence(logv), self.evidence(logalpha
) + 1, self.evidence(logbeta)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GMM_Module | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lw/clwpu4p6dl3z7eu4rihzzk7qnbodnnk5vqizfu2obbx6aipf3hap.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 16
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pn/cpnp26jw3ebmdlxqqo32ld5ldpctmkb3so3vjnso6aa7xfiznrgs.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hf/chfyhjvgskzitnauhm26uucgw6rkkhp5yurmxoagmrxc43zifwof.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 48
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (48, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (48, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 1024, grid=grid(1024), stream=stream0)
del buf0
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 2048, grid=grid(2048), stream=stream0)
del buf3
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 48, 4, 4), (768, 16, 4, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf7, primals_7, 3072, grid=grid(3072), stream=stream0)
del primals_7
return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((48, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.utils.data
class GMM_Module(nn.Module):
"""
GMM Module
"""
def __init__(self, out_channel_M, k):
super(GMM_Module, self).__init__()
self.conv1 = nn.Conv2d(int(out_channel_M), k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv1.weight.data, math.sqrt(2 *
1 * (k + 1) / (1 + 1)))
torch.nn.init.constant_(self.conv1.bias.data, 0.01)
self.lrelu_1 = nn.LeakyReLU()
self.conv2 = nn.Conv2d(k * out_channel_M, 2 * k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv2.weight.data, math.sqrt(2 *
1 * (k + 2 * k) / (k + k)))
torch.nn.init.constant_(self.conv2.bias.data, 0.01)
self.lrelu_2 = nn.LeakyReLU()
self.conv3 = nn.Conv2d(2 * k * out_channel_M, 3 * k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv3.weight.data, math.sqrt(2 *
1 * (2 * k + 3 * k) / (2 * k + 2 * k)))
torch.nn.init.constant_(self.conv3.bias.data, 0.01)
def forward(self, input):
x = self.lrelu_1(self.conv1(input))
x = self.lrelu_2(self.conv2(x))
return self.conv3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'out_channel_M': 4, 'k': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 48
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (48, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (48,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(1024)](buf0,
primals_2, buf1, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32
)
triton_poi_fused_convolution_leaky_relu_1[grid(2048)](buf3,
primals_5, buf4, buf5, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 48, 4, 4), (768, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_2[grid(3072)](buf7, primals_7, 3072,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf4, buf5)
class GMM_ModuleNew(nn.Module):
"""
GMM Module
"""
def __init__(self, out_channel_M, k):
super(GMM_ModuleNew, self).__init__()
self.conv1 = nn.Conv2d(int(out_channel_M), k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv1.weight.data, math.sqrt(2 *
1 * (k + 1) / (1 + 1)))
torch.nn.init.constant_(self.conv1.bias.data, 0.01)
self.lrelu_1 = nn.LeakyReLU()
self.conv2 = nn.Conv2d(k * out_channel_M, 2 * k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv2.weight.data, math.sqrt(2 *
1 * (k + 2 * k) / (k + k)))
torch.nn.init.constant_(self.conv2.bias.data, 0.01)
self.lrelu_2 = nn.LeakyReLU()
self.conv3 = nn.Conv2d(2 * k * out_channel_M, 3 * k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv3.weight.data, math.sqrt(2 *
1 * (2 * k + 3 * k) / (2 * k + 2 * k)))
torch.nn.init.constant_(self.conv3.bias.data, 0.01)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| wemozj/Image-Compression-based-GMM-and-Attention-Module | GMM_Module | false | 4,525 | [
"Apache-2.0"
] | 0 | 93f804dbcea8ffc1621456f3d104d0342c75373b | https://github.com/wemozj/Image-Compression-based-GMM-and-Attention-Module/tree/93f804dbcea8ffc1621456f3d104d0342c75373b | import math
import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
GMM Module
"""
def __init__(self, out_channel_M, k):
super().__init__()
self.conv1 = nn.Conv2d(int(out_channel_M), k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv1.weight.data, math.sqrt(2 *
1 * (k + 1) / (1 + 1)))
torch.nn.init.constant_(self.conv1.bias.data, 0.01)
self.lrelu_1 = nn.LeakyReLU()
self.conv2 = nn.Conv2d(k * out_channel_M, 2 * k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv2.weight.data, math.sqrt(2 *
1 * (k + 2 * k) / (k + k)))
torch.nn.init.constant_(self.conv2.bias.data, 0.01)
self.lrelu_2 = nn.LeakyReLU()
self.conv3 = nn.Conv2d(2 * k * out_channel_M, 3 * k * out_channel_M,
kernel_size=1)
torch.nn.init.xavier_normal_(self.conv3.weight.data, math.sqrt(2 *
1 * (2 * k + 3 * k) / (2 * k + 2 * k)))
torch.nn.init.constant_(self.conv3.bias.data, 0.01)
def forward(self, input):
x = self.lrelu_1(self.conv1(input))
x = self.lrelu_2(self.conv2(x))
return self.conv3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MobileViTv2Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ud/cudbnoor4dpxxdmeqssolgpxcs76nh6jnfj2xf7auyk3sidsohnf.py
# Topologically Sorted Source Nodes: [weight_i], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weight_i => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sx/csxi2lkjrgsyc43k5vauxutocx5sik4gnrglbjfjmvytt3alfq7w.py
# Topologically Sorted Source Nodes: [weight_i], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weight_i => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7y/c7yntgucdgdvliykxh2nieonoltkmctmfwt43tccrxhhdctkmemc.py
# Topologically Sorted Source Nodes: [weight_i, context_score, context_vector], Original ATen: [aten._softmax, aten.mul, aten.sum]
# Source node to ATen node mapping:
# context_score => mul
# context_vector => sum_2
# weight_i => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view_3), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {})
triton_poi_fused__softmax_mul_sum_2 = async_compile.triton('triton_poi_fused__softmax_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3 + (64*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x3 + (64*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x3 + (64*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x3 + (64*x2)), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x4), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xq/cxq3lj6wku5vfttdnwphylbql6ikefvc2ohuycd2fdmo25bzfftu.py
# Topologically Sorted Source Nodes: [weight_i, context_score, context_vector, v], Original ATen: [aten._softmax, aten.mul, aten.sum]
# Source node to ATen node mapping:
# context_score => mul
# context_vector => sum_2
# v => mul_1
# weight_i => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view_3), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %sum_2), kwargs = {})
triton_poi_fused__softmax_mul_sum_3 = async_compile.triton('triton_poi_fused__softmax_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [i], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [weight_i], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_4
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [weight_i], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_6
del primals_7
buf6 = reinterpret_tensor(buf2, (4, 1, 4, 4), (16, 64, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [weight_i, context_score, context_vector], Original ATen: [aten._softmax, aten.mul, aten.sum]
triton_poi_fused__softmax_mul_sum_2.run(buf4, buf3, buf6, 64, grid=grid(64), stream=stream0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight_i, context_score, context_vector, v], Original ATen: [aten._softmax, aten.mul, aten.sum]
triton_poi_fused__softmax_mul_sum_3.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf6
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_9
return (reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import init
class MobileViTv2Attention(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super(MobileViTv2Attention, self).__init__()
self.fc_i = nn.Linear(d_model, 1)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.d_model = d_model
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, input):
"""
Computes
:param queries: Queries (b_s, nq, d_model)
:return:
"""
i = self.fc_i(input)
weight_i = torch.softmax(i, dim=1)
context_score = weight_i * self.fc_k(input)
context_vector = torch.sum(context_score, dim=1, keepdim=True)
v = self.fc_v(input) * context_vector
out = self.fc_o(v)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x4, tmp14, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf3)
del primals_4
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf2, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf5)
del primals_6
del primals_7
buf6 = reinterpret_tensor(buf2, (4, 1, 4, 4), (16, 64, 4, 1), 0)
del buf2
triton_poi_fused__softmax_mul_sum_2[grid(64)](buf4, buf3, buf6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_mul_sum_3[grid(256)](buf5, buf6, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf6
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf7, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_9
return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, buf5, reinterpret_tensor(buf7, (64, 4), (4, 1), 0
), primals_8
class MobileViTv2AttentionNew(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super(MobileViTv2AttentionNew, self).__init__()
self.fc_i = nn.Linear(d_model, 1)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.d_model = d_model
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, input_0):
primals_1 = self.fc_i.weight
primals_2 = self.fc_i.bias
primals_4 = self.fc_k.weight
primals_5 = self.fc_k.bias
primals_6 = self.fc_v.weight
primals_7 = self.fc_v.bias
primals_8 = self.fc_o.weight
primals_9 = self.fc_o.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| weihaoxie/External-Attention-pytorch | MobileViTv2Attention | false | 4,526 | [
"MIT"
] | 0 | 9bec70f4ed8dd858c815e9bad240ab2f95a91a9f | https://github.com/weihaoxie/External-Attention-pytorch/tree/9bec70f4ed8dd858c815e9bad240ab2f95a91a9f | import torch
from torch import nn
from torch.nn import init
class Model(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super().__init__()
self.fc_i = nn.Linear(d_model, 1)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.d_model = d_model
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, input):
"""
Computes
:param queries: Queries (b_s, nq, d_model)
:return:
"""
i = self.fc_i(input)
weight_i = torch.softmax(i, dim=1)
context_score = weight_i * self.fc_k(input)
context_vector = torch.sum(context_score, dim=1, keepdim=True)
v = self.fc_v(input) * context_vector
out = self.fc_o(v)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
BitEstimator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3b/c3bdm3d3443oq2lufzqzrryuzboofq3y5dx2ddbcghahimvz3qqp.py
# Topologically Sorted Source Nodes: [softplus, mul, x, tanh, tanh_1, mul_1, x_1, softplus_1, mul_2, x_2, tanh_2, tanh_3, mul_3, x_3, softplus_2, mul_4, x_4, tanh_4, tanh_5, mul_5, x_5, softplus_3, mul_6, add_6, sigmoid], Original ATen: [aten.softplus, aten.mul, aten.add, aten.tanh, aten.sigmoid]
# Source node to ATen node mapping:
# add_6 => add_6
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# sigmoid => sigmoid
# softplus => exp, gt, log1p, where
# softplus_1 => exp_1, gt_1, log1p_1, where_1
# softplus_2 => exp_2, gt_2, log1p_2, where_2
# softplus_3 => exp_3, gt_3, log1p_3, where_3
# tanh => tanh
# tanh_1 => tanh_1
# tanh_2 => tanh_2
# tanh_3 => tanh_3
# tanh_4 => tanh_4
# tanh_5 => tanh_5
# x => add
# x_1 => add_1
# x_2 => add_2
# x_3 => add_3
# x_4 => add_4
# x_5 => add_5
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_1, 20), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %primals_1, %log1p), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %where), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_4,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_5,), kwargs = {})
# %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_5, 20), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %primals_5, %log1p_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %where_1), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_6), kwargs = {})
# %tanh_2 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %tanh_3 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_7,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %tanh_3), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_3), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_8,), kwargs = {})
# %log1p_2 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_8, 20), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %primals_8, %log1p_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %where_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_9), kwargs = {})
# %tanh_4 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_4,), kwargs = {})
# %tanh_5 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_10,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_4, %tanh_5), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_5), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_11,), kwargs = {})
# %log1p_3 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_3,), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_11, 20), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %primals_11, %log1p_3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, %where_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_12), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_6,), kwargs = {})
triton_poi_fused_add_mul_sigmoid_softplus_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_softplus_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_softplus_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_softplus_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr7 + (x1), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr8 + (x1), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr9 + (x1), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr10 + (x1), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr11 + (x1), xmask, eviction_policy='evict_last')
tmp2 = 20.0
tmp3 = tmp1 > tmp2
tmp4 = tl_math.exp(tmp1)
tmp5 = libdevice.log1p(tmp4)
tmp6 = tl.where(tmp3, tmp1, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = libdevice.tanh(tmp9)
tmp12 = libdevice.tanh(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 > tmp2
tmp17 = tl_math.exp(tmp15)
tmp18 = libdevice.log1p(tmp17)
tmp19 = tl.where(tmp16, tmp15, tmp18)
tmp20 = tmp14 * tmp19
tmp22 = tmp20 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp25 = libdevice.tanh(tmp24)
tmp26 = tmp23 * tmp25
tmp27 = tmp22 + tmp26
tmp29 = tmp28 > tmp2
tmp30 = tl_math.exp(tmp28)
tmp31 = libdevice.log1p(tmp30)
tmp32 = tl.where(tmp29, tmp28, tmp31)
tmp33 = tmp27 * tmp32
tmp35 = tmp33 + tmp34
tmp36 = libdevice.tanh(tmp35)
tmp38 = libdevice.tanh(tmp37)
tmp39 = tmp36 * tmp38
tmp40 = tmp35 + tmp39
tmp42 = tmp41 > tmp2
tmp43 = tl_math.exp(tmp41)
tmp44 = libdevice.log1p(tmp43)
tmp45 = tl.where(tmp42, tmp41, tmp44)
tmp46 = tmp40 * tmp45
tmp48 = tmp46 + tmp47
tmp49 = tl.sigmoid(tmp48)
tl.store(in_out_ptr0 + (x3), tmp49, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_12, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [softplus, mul, x, tanh, tanh_1, mul_1, x_1, softplus_1, mul_2, x_2, tanh_2, tanh_3, mul_3, x_3, softplus_2, mul_4, x_4, tanh_4, tanh_5, mul_5, x_5, softplus_3, mul_6, add_6, sigmoid], Original ATen: [aten.softplus, aten.mul, aten.add, aten.tanh, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_softplus_tanh_0.run(buf1, primals_2, primals_1, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, 256, grid=grid(256), stream=stream0)
del primals_12
return (buf1, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Bitparm(nn.Module):
"""
save params
"""
def __init__(self, channel, final=False):
super(Bitparm, self).__init__()
self.final = final
self.h = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
self.b = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
if not final:
self.a = nn.Parameter(torch.nn.init.normal_(torch.empty(channel
).view(1, -1, 1, 1), 0, 0.01))
else:
self.a = None
def forward(self, x):
if self.final:
return torch.sigmoid(x * F.softplus(self.h) + self.b)
else:
x = x * F.softplus(self.h) + self.b
return x + torch.tanh(x) * torch.tanh(self.a)
class BitEstimator(nn.Module):
"""
Estimate bit
"""
def __init__(self, channel):
super(BitEstimator, self).__init__()
self.f1 = Bitparm(channel)
self.f2 = Bitparm(channel)
self.f3 = Bitparm(channel)
self.f4 = Bitparm(channel, True)
def forward(self, x):
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
return self.f4(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sigmoid_softplus_tanh_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, in_ptr11, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr11 + x1, xmask, eviction_policy='evict_last')
tmp2 = 20.0
tmp3 = tmp1 > tmp2
tmp4 = tl_math.exp(tmp1)
tmp5 = libdevice.log1p(tmp4)
tmp6 = tl.where(tmp3, tmp1, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = libdevice.tanh(tmp9)
tmp12 = libdevice.tanh(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 > tmp2
tmp17 = tl_math.exp(tmp15)
tmp18 = libdevice.log1p(tmp17)
tmp19 = tl.where(tmp16, tmp15, tmp18)
tmp20 = tmp14 * tmp19
tmp22 = tmp20 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp25 = libdevice.tanh(tmp24)
tmp26 = tmp23 * tmp25
tmp27 = tmp22 + tmp26
tmp29 = tmp28 > tmp2
tmp30 = tl_math.exp(tmp28)
tmp31 = libdevice.log1p(tmp30)
tmp32 = tl.where(tmp29, tmp28, tmp31)
tmp33 = tmp27 * tmp32
tmp35 = tmp33 + tmp34
tmp36 = libdevice.tanh(tmp35)
tmp38 = libdevice.tanh(tmp37)
tmp39 = tmp36 * tmp38
tmp40 = tmp35 + tmp39
tmp42 = tmp41 > tmp2
tmp43 = tl_math.exp(tmp41)
tmp44 = libdevice.log1p(tmp43)
tmp45 = tl.where(tmp42, tmp41, tmp44)
tmp46 = tmp40 * tmp45
tmp48 = tmp46 + tmp47
tmp49 = tl.sigmoid(tmp48)
tl.store(in_out_ptr0 + x3, tmp49, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_12, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_softplus_tanh_0[grid(256)](buf1,
primals_2, primals_1, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, primals_10,
primals_11, primals_12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_12
return (buf1, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, primals_10, primals_11,
buf1)
class Bitparm(nn.Module):
"""
save params
"""
def __init__(self, channel, final=False):
super(Bitparm, self).__init__()
self.final = final
self.h = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
self.b = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
if not final:
self.a = nn.Parameter(torch.nn.init.normal_(torch.empty(channel
).view(1, -1, 1, 1), 0, 0.01))
else:
self.a = None
def forward(self, x):
if self.final:
return torch.sigmoid(x * F.softplus(self.h) + self.b)
else:
x = x * F.softplus(self.h) + self.b
return x + torch.tanh(x) * torch.tanh(self.a)
class BitEstimatorNew(nn.Module):
"""
Estimate bit
"""
def __init__(self, channel):
super(BitEstimatorNew, self).__init__()
self.f1 = Bitparm(channel)
self.f2 = Bitparm(channel)
self.f3 = Bitparm(channel)
self.f4 = Bitparm(channel, True)
def forward(self, input_0):
primals_1 = self.f1.h
primals_3 = self.f1.b
primals_4 = self.f1.a
primals_5 = self.f2.h
primals_6 = self.f2.b
primals_7 = self.f2.a
primals_8 = self.f3.h
primals_9 = self.f3.b
primals_10 = self.f3.a
primals_11 = self.f4.h
primals_12 = self.f4.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| wemozj/Image-Compression-based-GMM-and-Attention-Module | BitEstimator | false | 4,527 | [
"Apache-2.0"
] | 0 | 93f804dbcea8ffc1621456f3d104d0342c75373b | https://github.com/wemozj/Image-Compression-based-GMM-and-Attention-Module/tree/93f804dbcea8ffc1621456f3d104d0342c75373b | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Bitparm(nn.Module):
"""
save params
"""
def __init__(self, channel, final=False):
super().__init__()
self.final = final
self.h = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
self.b = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
if not final:
self.a = nn.Parameter(torch.nn.init.normal_(torch.empty(channel
).view(1, -1, 1, 1), 0, 0.01))
else:
self.a = None
def forward(self, x):
if self.final:
return torch.sigmoid(x * F.softplus(self.h) + self.b)
else:
x = x * F.softplus(self.h) + self.b
return x + torch.tanh(x) * torch.tanh(self.a)
class Model(nn.Module):
"""
Estimate bit
"""
def __init__(self, channel):
super().__init__()
self.f1 = Bitparm(channel)
self.f2 = Bitparm(channel)
self.f3 = Bitparm(channel)
self.f4 = Bitparm(channel, True)
def forward(self, x):
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
return self.f4(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Prototype | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ms/cms4fv3omildjvpq2cgx3it3pwz5faljuuiat4qcyrcnq3rci2ej.py
# Topologically Sorted Source Nodes: [cdist], Original ATen: [aten._euclidean_dist]
# Source node to ATen node mapping:
# cdist => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %sum_1, %full_default], -1), kwargs = {})
triton_poi_fused__euclidean_dist_0 = async_compile.triton('triton_poi_fused__euclidean_dist_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__euclidean_dist_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__euclidean_dist_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = -2.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 5, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (4*x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp14 * tmp14
tmp16 = tl.load(in_ptr0 + (1 + (4*x1)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tl.load(in_ptr0 + (2 + (4*x1)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tl.load(in_ptr0 + (3 + (4*x1)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp13, tmp24, tmp25)
tmp27 = tmp0 >= tmp11
tmp28 = tl.full([1], 6, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = 1.0
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp27, tmp30, tmp31)
tmp33 = tl.where(tmp13, tmp26, tmp32)
tmp34 = tl.where(tmp4, tmp9, tmp33)
tl.store(out_ptr0 + (x2), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lg/clgibvy6k5yoz65ryyz35bjx6twpc4zxf7hpntq6tblfnwym3ufv.py
# Topologically Sorted Source Nodes: [cdist], Original ATen: [aten._euclidean_dist]
# Source node to ATen node mapping:
# cdist => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %full_default_1, %sum_2], -1), kwargs = {})
triton_poi_fused__euclidean_dist_1 = async_compile.triton('triton_poi_fused__euclidean_dist_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__euclidean_dist_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__euclidean_dist_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = 1.0
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tmp0 >= tmp7
tmp14 = tl.full([1], 6, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tl.load(in_ptr0 + (4*x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 * tmp16
tmp18 = tl.load(in_ptr0 + (1 + (4*x1)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tl.load(in_ptr0 + (2 + (4*x1)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tl.load(in_ptr0 + (3 + (4*x1)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp13, tmp26, tmp27)
tmp29 = tl.where(tmp9, tmp12, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3k/c3kmh7lsmnzdzr342q44fn2y2heu6eptfn2e7nbfojxmv35w6kyt.py
# Topologically Sorted Source Nodes: [cdist, dis_2], Original ATen: [aten._euclidean_dist, aten.pow]
# Source node to ATen node mapping:
# cdist => clamp_min, sqrt
# dis_2 => pow_3
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mm, 0), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sqrt, 2), kwargs = {})
triton_poi_fused__euclidean_dist_pow_2 = async_compile.triton('triton_poi_fused__euclidean_dist_pow_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__euclidean_dist_pow_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__euclidean_dist_pow_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = libdevice.sqrt(tmp2)
tmp4 = tmp3 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 6), (6, 1), torch.float32)
# Topologically Sorted Source Nodes: [cdist], Original ATen: [aten._euclidean_dist]
stream0 = get_raw_stream(0)
triton_poi_fused__euclidean_dist_0.run(primals_1, buf0, 384, grid=grid(384), stream=stream0)
buf1 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
# Topologically Sorted Source Nodes: [cdist], Original ATen: [aten._euclidean_dist]
triton_poi_fused__euclidean_dist_1.run(primals_2, buf1, 24, grid=grid(24), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cdist], Original ATen: [aten._euclidean_dist]
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (6, 4), (1, 6), 0), out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cdist, dis_2], Original ATen: [aten._euclidean_dist, aten.pow]
triton_poi_fused__euclidean_dist_pow_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
import torch.optim
import torch.utils.data
class Prototype(torch.nn.Module):
""""""
def __init__(self, prototype_num, latent_size) ->None:
super(Prototype, self).__init__()
self.latent_size = latent_size
self.prototype_num = prototype_num
self.prototypes = torch.nn.Parameter(data=torch.rand(self.
prototype_num, self.latent_size), requires_grad=True)
def forward(self, input):
""""""
x = input.view(-1, self.latent_size)
dis_2 = self.__calc_dist_2(x, self.prototypes)
return dis_2, self.prototypes
def __calc_dist_2(self, x, y):
""""""
dis_2 = torch.cdist(x, y, p=2) ** 2
return dis_2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'prototype_num': 4, 'latent_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__euclidean_dist_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = -2.0
tmp7 = tmp5 * tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 5, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + 4 * x1, tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp14 * tmp14
tmp16 = tl.load(in_ptr0 + (1 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tl.load(in_ptr0 + (2 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tl.load(in_ptr0 + (3 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp13, tmp24, tmp25)
tmp27 = tmp0 >= tmp11
tl.full([1], 6, tl.int64)
tmp30 = 1.0
tmp31 = tl.full(tmp30.shape, 0.0, tmp30.dtype)
tmp32 = tl.where(tmp27, tmp30, tmp31)
tmp33 = tl.where(tmp13, tmp26, tmp32)
tmp34 = tl.where(tmp4, tmp9, tmp33)
tl.store(out_ptr0 + x2, tmp34, xmask)
@triton.jit
def triton_poi_fused__euclidean_dist_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 24
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = 1.0
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp16 = tl.load(in_ptr0 + 4 * x1, tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp16 * tmp16
tmp18 = tl.load(in_ptr0 + (1 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tl.load(in_ptr0 + (2 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tl.load(in_ptr0 + (3 + 4 * x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp13, tmp26, tmp27)
tmp29 = tl.where(tmp9, tmp12, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused__euclidean_dist_pow_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = libdevice.sqrt(tmp2)
tmp4 = tmp3 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 6), (6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__euclidean_dist_0[grid(384)](primals_1, buf0, 384,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 6), (6, 1), torch.float32)
triton_poi_fused__euclidean_dist_1[grid(24)](primals_2, buf1, 24,
XBLOCK=32, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (6, 4), (1, 6), 0),
out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused__euclidean_dist_pow_2[grid(256)](buf2, buf3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf2
class PrototypeNew(torch.nn.Module):
""""""
def __init__(self, prototype_num, latent_size) ->None:
super(PrototypeNew, self).__init__()
self.latent_size = latent_size
self.prototype_num = prototype_num
self.prototypes = torch.nn.Parameter(data=torch.rand(self.
prototype_num, self.latent_size), requires_grad=True)
def __calc_dist_2(self, x, y):
""""""
dis_2 = torch.cdist(x, y, p=2) ** 2
return dis_2
def forward(self, input_0):
primals_2 = self.prototypes
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
| wenqiangxie/Prototype-Net | Prototype | false | 4,528 | [
"MIT"
] | 0 | a5ddd9976b78828d87806f9451a5092de3ff5c69 | https://github.com/wenqiangxie/Prototype-Net/tree/a5ddd9976b78828d87806f9451a5092de3ff5c69 | import torch
import torch.nn
import torch.optim
import torch.utils.data
class Model(torch.nn.Module):
""""""
def __init__(self, prototype_num, latent_size) ->None:
super().__init__()
self.latent_size = latent_size
self.prototype_num = prototype_num
self.prototypes = torch.nn.Parameter(data=torch.rand(self.
prototype_num, self.latent_size), requires_grad=True)
def forward(self, input):
""""""
x = input.view(-1, self.latent_size)
dis_2 = self.__calc_dist_2(x, self.prototypes)
return dis_2, self.prototypes
def __calc_dist_2(self, x, y):
""""""
dis_2 = torch.cdist(x, y, p=2) ** 2
return dis_2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Model | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r6/cr6xs2yojfgnmaswzgpbcdfqvoa2jyjixuhbg5yxtudmuzzfoblr.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 2
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 2, 1, 1), (2, 1, 1, 1))
assert_size_stride(primals_2, (2, ), (1, ))
assert_size_stride(primals_3, (4, 2, 64, 64), (8192, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 32768, grid=grid(32768), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 2, 1, 1), (2, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 2, 64, 64), (8192, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch._C
import torch.serialization
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 2, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 2
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 2, 1, 1), (2, 1, 1, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 2, 64, 64), (8192, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 64, 64), (8192, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(32768)](buf1, primals_2, 32768,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class ModelNew(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| whu-pzhang/mmsegmentation | Model | false | 4,529 | [
"Apache-2.0"
] | 0 | 46326f63ce411c794d237e986dd3924590d0e75e | https://github.com/whu-pzhang/mmsegmentation/tree/46326f63ce411c794d237e986dd3924590d0e75e | import torch
import torch.nn as nn
import torch._C
import torch.serialization
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 2, 64, 64])]
def get_init_inputs():
return []
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/rm/crmjcbrhesyjltwjwo2gy5ktnw7trv24ctlurkfme6ykhtfquq32.py
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_score => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rb/crbncgepp7pchewiviz2ecap4hkql77bxprjbw2ciuujmpu57s6c.py
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_score => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/23/c23eou4zjpkhw2g74fh6hkditmlm7g34v4yqt2e2v5xezsmce2cg.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_9, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_9, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bb/cbby6op7dmkjsypxm4o3urasth73g6q5oi4ddo6uk6dsuv6off2v.py
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_vec => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/we/cwe54p4p4jvwbdktkpj3wy2coheu6f3r3dgvi7ozm7xjfk4mgbwx.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_4,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ns/cnspfsjjmvserkfymbru7x5vm2xumtyor5javdiv74jr3avx67rq.py
# Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# layer_norm => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_16), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7u/c7uxwow3tztifyrr5oj6dotpbrh7qtup53xfydkt35y65ajtfwre.py
# Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# layer_norm => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_16), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_3, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_4), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 16, 4, grid=grid(16, 4), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf0, primals_3, buf2, 16, 4, grid=grid(16, 4), stream=stream0)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf0, primals_3, buf6, 16, 4, grid=grid(16, 4), stream=stream0)
del buf0
del primals_3
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf9, buf10, buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, layer_norm], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf9, buf10, buf11, primals_6, primals_7, buf12, 64, grid=grid(64), stream=stream0)
del buf10
del buf11
del primals_7
return (buf12, primals_1, primals_6, buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from typing import Any
import torch.nn.functional as F
from torch import nn
def ifnone(a: 'Any', b: 'Any') ->Any:
"""`a` if `a` is not None, otherwise `b`."""
return b if a is None else a
class MultiHeadAttention(nn.Module):
"""MutiHeadAttention."""
def __init__(self, n_heads: 'int', d_model: 'int', d_head: 'int'=None,
resid_p: 'float'=0.0, attn_p: 'float'=0.0, bias: 'bool'=True, scale:
'bool'=True):
super().__init__()
d_head = ifnone(d_head, d_model // n_heads)
self.n_heads, self.d_head, self.scale = n_heads, d_head, scale
self.attention = nn.Linear(d_model, 3 * n_heads * d_head, bias=bias)
self.out = nn.Linear(n_heads * d_head, d_model, bias=bias)
self.drop_att, self.drop_res = nn.Dropout(attn_p), nn.Dropout(resid_p)
self.ln = nn.LayerNorm(d_model)
def forward(self, x: 'Tensor', mask: 'Tensor'=None, **kwargs):
return self.ln(x + self.drop_res(self.out(self._apply_attention(x,
mask=mask, **kwargs))))
def _apply_attention(self, x: 'Tensor', mask: 'Tensor'=None):
bs, x_len = x.size(0), x.size(1)
wq, wk, wv = torch.chunk(self.attention(x), 3, dim=-1)
wq, wk, wv = map(lambda x: x.view(bs, x_len, self.n_heads, self.
d_head), (wq, wk, wv))
wq, wk, wv = wq.permute(0, 2, 1, 3), wk.permute(0, 2, 3, 1
), wv.permute(0, 2, 1, 3)
attn_score = torch.matmul(wq, wk)
if self.scale:
attn_score.div_(self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')
).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=-1))
attn_vec = torch.matmul(attn_prob, wv)
return attn_vec.permute(0, 2, 1, 3).contiguous().contiguous().view(bs,
x_len, -1)
def _attention_einsum(self, x, mask=None):
bs, x_len = x.size(0), x.size(1)
wq, wk, wv = torch.chunk(self.attention(x), 3, dim=-1)
wq, wk, wv = map(lambda x: x.view(bs, x_len, self.n_heads, self.
d_head), (wq, wk, wv))
attn_score = torch.einsum('bind,bjnd->bijn', (wq, wk))
if self.scale:
attn_score.mul_(1 / self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')
).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=2))
attn_vec = torch.einsum('bijn,bjnd->bind', (attn_prob, wv))
return attn_vec.contiguous().view(bs, x_len, -1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_heads': 4, 'd_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import Tensor
from typing import Any
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tl_math.exp(tmp14)
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf0, primals_3, buf2, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf0, primals_3, buf6, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf0
del primals_3
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_1, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_1, buf9,
buf10, buf11, primals_6, primals_7, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_7
return buf12, primals_1, primals_6, buf5, reinterpret_tensor(buf8, (16,
4), (4, 1), 0), buf9, primals_4, reinterpret_tensor(buf6, (16, 1, 4
), (4, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0)
def ifnone(a: 'Any', b: 'Any') ->Any:
"""`a` if `a` is not None, otherwise `b`."""
return b if a is None else a
class MultiHeadAttentionNew(nn.Module):
"""MutiHeadAttention."""
def __init__(self, n_heads: 'int', d_model: 'int', d_head: 'int'=None,
resid_p: 'float'=0.0, attn_p: 'float'=0.0, bias: 'bool'=True, scale:
'bool'=True):
super().__init__()
d_head = ifnone(d_head, d_model // n_heads)
self.n_heads, self.d_head, self.scale = n_heads, d_head, scale
self.attention = nn.Linear(d_model, 3 * n_heads * d_head, bias=bias)
self.out = nn.Linear(n_heads * d_head, d_model, bias=bias)
self.drop_att, self.drop_res = nn.Dropout(attn_p), nn.Dropout(resid_p)
self.ln = nn.LayerNorm(d_model)
def _apply_attention(self, x: 'Tensor', mask: 'Tensor'=None):
bs, x_len = x.size(0), x.size(1)
wq, wk, wv = torch.chunk(self.attention(x), 3, dim=-1)
wq, wk, wv = map(lambda x: x.view(bs, x_len, self.n_heads, self.
d_head), (wq, wk, wv))
wq, wk, wv = wq.permute(0, 2, 1, 3), wk.permute(0, 2, 3, 1
), wv.permute(0, 2, 1, 3)
attn_score = torch.matmul(wq, wk)
if self.scale:
attn_score.div_(self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')
).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=-1))
attn_vec = torch.matmul(attn_prob, wv)
return attn_vec.permute(0, 2, 1, 3).contiguous().contiguous().view(bs,
x_len, -1)
def _attention_einsum(self, x, mask=None):
bs, x_len = x.size(0), x.size(1)
wq, wk, wv = torch.chunk(self.attention(x), 3, dim=-1)
wq, wk, wv = map(lambda x: x.view(bs, x_len, self.n_heads, self.
d_head), (wq, wk, wv))
attn_score = torch.einsum('bind,bjnd->bijn', (wq, wk))
if self.scale:
attn_score.mul_(1 / self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')
).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=2))
attn_vec = torch.einsum('bijn,bjnd->bind', (attn_prob, wv))
return attn_vec.contiguous().view(bs, x_len, -1)
def forward(self, input_0):
primals_2 = self.attention.weight
primals_3 = self.attention.bias
primals_4 = self.out.weight
primals_5 = self.out.bias
primals_6 = self.ln.weight
primals_7 = self.ln.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| wegroupwolves/fastai | MultiHeadAttention | false | 4,530 | [
"Apache-2.0"
] | 0 | df40df403e05e132411f0f7abc7ec33c86e58bb9 | https://github.com/wegroupwolves/fastai/tree/df40df403e05e132411f0f7abc7ec33c86e58bb9 | import torch
from torch import Tensor
from typing import Any
import torch.nn.functional as F
from torch import nn
def ifnone(a: 'Any', b: 'Any') ->Any:
"""`a` if `a` is not None, otherwise `b`."""
return b if a is None else a
class Model(nn.Module):
"""MutiHeadAttention."""
def __init__(self, n_heads: 'int', d_model: 'int', d_head: 'int'=None,
resid_p: 'float'=0.0, attn_p: 'float'=0.0, bias: 'bool'=True, scale:
'bool'=True):
super().__init__()
d_head = ifnone(d_head, d_model // n_heads)
self.n_heads, self.d_head, self.scale = n_heads, d_head, scale
self.attention = nn.Linear(d_model, 3 * n_heads * d_head, bias=bias)
self.out = nn.Linear(n_heads * d_head, d_model, bias=bias)
self.drop_att, self.drop_res = nn.Dropout(attn_p), nn.Dropout(resid_p)
self.ln = nn.LayerNorm(d_model)
def forward(self, x: 'Tensor', mask: 'Tensor'=None, **kwargs):
return self.ln(x + self.drop_res(self.out(self._apply_attention(x,
mask=mask, **kwargs))))
def _apply_attention(self, x: 'Tensor', mask: 'Tensor'=None):
bs, x_len = x.size(0), x.size(1)
wq, wk, wv = torch.chunk(self.attention(x), 3, dim=-1)
wq, wk, wv = map(lambda x: x.view(bs, x_len, self.n_heads, self.
d_head), (wq, wk, wv))
wq, wk, wv = wq.permute(0, 2, 1, 3), wk.permute(0, 2, 3, 1
), wv.permute(0, 2, 1, 3)
attn_score = torch.matmul(wq, wk)
if self.scale:
attn_score.div_(self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')
).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=-1))
attn_vec = torch.matmul(attn_prob, wv)
return attn_vec.permute(0, 2, 1, 3).contiguous().contiguous().view(bs,
x_len, -1)
def _attention_einsum(self, x, mask=None):
bs, x_len = x.size(0), x.size(1)
wq, wk, wv = torch.chunk(self.attention(x), 3, dim=-1)
wq, wk, wv = map(lambda x: x.view(bs, x_len, self.n_heads, self.
d_head), (wq, wk, wv))
attn_score = torch.einsum('bind,bjnd->bijn', (wq, wk))
if self.scale:
attn_score.mul_(1 / self.d_head ** 0.5)
if mask is not None:
attn_score = attn_score.float().masked_fill(mask, -float('inf')
).type_as(attn_score)
attn_prob = self.drop_att(F.softmax(attn_score, dim=2))
attn_vec = torch.einsum('bijn,bjnd->bind', (attn_prob, wv))
return attn_vec.contiguous().view(bs, x_len, -1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GDN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/sd/csdwvvi3wbrioicxfahsmuujha2ghzed5cce2spz33p6va2ab6i4.py
# Topologically Sorted Source Nodes: [gamma, pow_2, gamma_1], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub]
# Source node to ATen node mapping:
# gamma => full_default_1, maximum_1
# gamma_1 => sub_1
# pow_2 => pow_2
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 3.814697265625e-06), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_3, %full_default_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum_1, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, 1.4551915228366852e-11), kwargs = {})
triton_poi_fused_maximum_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_maximum_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_maximum_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_maximum_mul_pow_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.814697265625e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2y/c2y2depfxa2z6x54ydtq4sztgglwdvphffk5xy2ad6meezomjm6h.py
# Topologically Sorted Source Nodes: [pow_3], Original ATen: [aten.pow]
# Source node to ATen node mapping:
# pow_3 => pow_3
# Graph fragment:
# %pow_3 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
triton_poi_fused_pow_1 = async_compile.triton('triton_poi_fused_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rf/crfppqbslgh44juuhqfpfib7bokrzsfockkr2b3gvhzhdu4wbgaj.py
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution]
# Source node to ATen node mapping:
# beta => full_default, maximum
# beta_1 => sub
# norm_ => convolution
# pow_1 => pow_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 0.0010000072652474046), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_2, %full_default), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, 1.4551915228366852e-11), kwargs = {})
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%pow_3, %view, %sub, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_maximum_mul_pow_sub_2 = async_compile.triton('triton_poi_fused_convolution_maximum_mul_pow_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_maximum_mul_pow_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0010000072652474046
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tc/ctce67tjwgeiaxzrcsilnohm7paxqb2pdfdqcrlrvnl3xpwm4s4c.py
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_, norm__1, outputs], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# beta => full_default, maximum
# beta_1 => sub
# norm_ => convolution
# norm__1 => sqrt
# outputs => div
# pow_1 => pow_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 0.0010000072652474046), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_2, %full_default), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, 1.4551915228366852e-11), kwargs = {})
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%pow_3, %view, %sub, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%convolution,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %sqrt), kwargs = {})
triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3 = async_compile.triton('triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = libdevice.sqrt(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gamma, pow_2, gamma_1], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_maximum_mul_pow_sub_0.run(primals_3, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_3], Original ATen: [aten.pow]
triton_poi_fused_pow_1.run(primals_1, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution]
triton_poi_fused_convolution_maximum_mul_pow_sub_2.run(primals_2, buf2, 4, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution]
buf3 = extern_kernels.convolution(buf1, reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_, norm__1, outputs], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution, aten.sqrt, aten.div]
triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3.run(buf4, buf2, primals_1, buf5, 256, grid=grid(256), stream=stream0)
del buf2
return (buf5, primals_1, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import torch.nn as nn
import torch.utils.data
class LowerBound(Function):
@staticmethod
def forward(ctx, inputs, bound):
b = torch.ones_like(inputs) * bound
ctx.save_for_backward(inputs, b)
return torch.max(inputs, b)
@staticmethod
def backward(ctx, grad_output):
inputs, b = ctx.saved_tensors
pass_through_1 = inputs >= b
pass_through_2 = grad_output < 0
pass_through = pass_through_1 | pass_through_2
return pass_through.type(grad_output.dtype) * grad_output, None
class GDN(nn.Module):
"""Generalized divisive normalization layer.
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
"""
def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1,
reparam_offset=2 ** -18):
super(GDN, self).__init__()
self.inverse = inverse
self.beta_min = beta_min
self.gamma_init = gamma_init
self.reparam_offset = reparam_offset
self.build(ch)
def build(self, ch):
self.pedestal = self.reparam_offset ** 2
self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5
self.gamma_bound = self.reparam_offset
beta = torch.sqrt(torch.ones(ch) + self.pedestal)
self.beta = nn.Parameter(beta)
eye = torch.eye(ch)
g = self.gamma_init * eye
g = g + self.pedestal
gamma = torch.sqrt(g)
self.gamma = nn.Parameter(gamma)
self.pedestal = self.pedestal
def forward(self, inputs):
unfold = False
if inputs.dim() == 5:
unfold = True
bs, ch, d, w, h = inputs.size()
inputs = inputs.view(bs, ch, d * w, h)
_, ch, _, _ = inputs.size()
beta = LowerBound.apply(self.beta, self.beta_bound)
beta = beta ** 2 - self.pedestal
gamma = LowerBound.apply(self.gamma, self.gamma_bound)
gamma = gamma ** 2 - self.pedestal
gamma = gamma.view(ch, ch, 1, 1)
norm_ = nn.functional.conv2d(inputs ** 2, gamma, beta)
norm_ = torch.sqrt(norm_)
if self.inverse:
outputs = inputs * norm_
else:
outputs = inputs / norm_
if unfold:
outputs = outputs.view(bs, ch, d, w, h)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ch': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_maximum_mul_pow_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.814697265625e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sub_2(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0010000072652474046
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = libdevice.sqrt(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_maximum_mul_pow_sub_0[grid(16)](primals_3, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_pow_1[grid(256)](primals_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_convolution_maximum_mul_pow_sub_2[grid(4)](primals_2,
buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(buf1, reinterpret_tensor(buf0, (4,
4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3[grid(256)](
buf4, buf2, primals_1, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf2
return buf5, primals_1, primals_2, primals_3, reinterpret_tensor(buf0,
(4, 4, 1, 1), (4, 1, 1, 1), 0), buf1, buf4
class LowerBound(Function):
@staticmethod
def forward(ctx, inputs, bound):
b = torch.ones_like(inputs) * bound
ctx.save_for_backward(inputs, b)
return torch.max(inputs, b)
@staticmethod
def backward(ctx, grad_output):
inputs, b = ctx.saved_tensors
pass_through_1 = inputs >= b
pass_through_2 = grad_output < 0
pass_through = pass_through_1 | pass_through_2
return pass_through.type(grad_output.dtype) * grad_output, None
class GDNNew(nn.Module):
"""Generalized divisive normalization layer.
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
"""
def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1,
reparam_offset=2 ** -18):
super(GDNNew, self).__init__()
self.inverse = inverse
self.beta_min = beta_min
self.gamma_init = gamma_init
self.reparam_offset = reparam_offset
self.build(ch)
def build(self, ch):
self.pedestal = self.reparam_offset ** 2
self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5
self.gamma_bound = self.reparam_offset
beta = torch.sqrt(torch.ones(ch) + self.pedestal)
self.beta = nn.Parameter(beta)
eye = torch.eye(ch)
g = self.gamma_init * eye
g = g + self.pedestal
gamma = torch.sqrt(g)
self.gamma = nn.Parameter(gamma)
self.pedestal = self.pedestal
def forward(self, input_0):
primals_2 = self.beta
primals_3 = self.gamma
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| wemozj/Image-Compression-based-GMM-and-Attention-Module | GDN | false | 4,531 | [
"Apache-2.0"
] | 0 | 93f804dbcea8ffc1621456f3d104d0342c75373b | https://github.com/wemozj/Image-Compression-based-GMM-and-Attention-Module/tree/93f804dbcea8ffc1621456f3d104d0342c75373b | from torch.autograd import Function
import torch
import torch.nn as nn
import torch.utils.data
class LowerBound(Function):
@staticmethod
def forward(ctx, inputs, bound):
b = torch.ones_like(inputs) * bound
ctx.save_for_backward(inputs, b)
return torch.max(inputs, b)
@staticmethod
def backward(ctx, grad_output):
inputs, b = ctx.saved_tensors
pass_through_1 = inputs >= b
pass_through_2 = grad_output < 0
pass_through = pass_through_1 | pass_through_2
return pass_through.type(grad_output.dtype) * grad_output, None
class Model(nn.Module):
"""Generalized divisive normalization layer.
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
"""
def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1,
reparam_offset=2 ** -18):
super().__init__()
self.inverse = inverse
self.beta_min = beta_min
self.gamma_init = gamma_init
self.reparam_offset = reparam_offset
self.build(ch)
def build(self, ch):
self.pedestal = self.reparam_offset ** 2
self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5
self.gamma_bound = self.reparam_offset
beta = torch.sqrt(torch.ones(ch) + self.pedestal)
self.beta = nn.Parameter(beta)
eye = torch.eye(ch)
g = self.gamma_init * eye
g = g + self.pedestal
gamma = torch.sqrt(g)
self.gamma = nn.Parameter(gamma)
self.pedestal = self.pedestal
def forward(self, inputs):
unfold = False
if inputs.dim() == 5:
unfold = True
bs, ch, d, w, h = inputs.size()
inputs = inputs.view(bs, ch, d * w, h)
_, ch, _, _ = inputs.size()
beta = LowerBound.apply(self.beta, self.beta_bound)
beta = beta ** 2 - self.pedestal
gamma = LowerBound.apply(self.gamma, self.gamma_bound)
gamma = gamma ** 2 - self.pedestal
gamma = gamma.view(ch, ch, 1, 1)
norm_ = nn.functional.conv2d(inputs ** 2, gamma, beta)
norm_ = torch.sqrt(norm_)
if self.inverse:
outputs = inputs * norm_
else:
outputs = inputs / norm_
if unfold:
outputs = outputs.view(bs, ch, d, w, h)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
UFOAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/oh/cohi5innigfnlg3a4vz3tzzxnyrdwyi342o5yrghstooasfjvhet.py
# Topologically Sorted Source Nodes: [kv], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# kv => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zo/czomnz74isoxx6up5u2hmx722cdztmgay2wiq7c54b4ihien6mpt.py
# Topologically Sorted Source Nodes: [kv], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# kv => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/t2/ct2mzc2szheskf3r6msdggseeexrinz2uczzpn6k3pwvv6vc2vio.py
# Topologically Sorted Source Nodes: [norm_tensor, mul, kv_norm], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.div]
# Source node to ATen node mapping:
# kv_norm => div
# mul => mul
# norm_tensor => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_11, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, %primals_10), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %pow_2), kwargs = {})
triton_poi_fused_div_linalg_vector_norm_mul_2 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = (xindex // 16) % 4
x5 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4*x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x5)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x5)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x5)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp2 / tmp14
tl.store(out_ptr0 + (x4), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/h6/ch65kkrcljiqxk6uhmtskijz2ynwfqf7gggfxjtnlm2uhjjju6sb.py
# Topologically Sorted Source Nodes: [norm_tensor_1, mul_1, q_norm, matmul_1], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.div, aten.clone]
# Source node to ATen node mapping:
# matmul_1 => clone_2
# mul_1 => mul_1
# norm_tensor_1 => pow_3, pow_4, sum_2
# q_norm => div_1
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%permute_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_1, %primals_10), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %pow_4), kwargs = {})
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_div_linalg_vector_norm_mul_3 = async_compile.triton('triton_poi_fused_clone_div_linalg_vector_norm_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_div_linalg_vector_norm_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_div_linalg_vector_norm_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 4) % 4
x5 = (xindex // 4)
x0 = xindex % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4*x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x5)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x5)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x5)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp2 / tmp14
tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6b/c6busvilz5nn36jjet3bmw7cqddirh4sgalamjr3fsrp3sbsacfi.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, ), (1, ))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16, ), (1, ))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (16, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [kv], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf1, primals_6, buf3, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_6
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [kv], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf2, primals_8, buf4, 256, grid=grid(256), stream=stream0)
del primals_8
buf5 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [kv], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_tensor, mul, kv_norm], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.div]
triton_poi_fused_div_linalg_vector_norm_mul_2.run(buf5, primals_10, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_tensor_1, mul_1, q_norm, matmul_1], Original ATen: [aten.linalg_vector_norm, aten.mul, aten.div, aten.clone]
triton_poi_fused_clone_div_linalg_vector_norm_mul_3.run(buf0, primals_10, buf7, 256, grid=grid(256), stream=stream0)
buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf8, buf9, 256, grid=grid(256), stream=stream0)
del buf8
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, reinterpret_tensor(buf9, (16, 16), (16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf10)
del primals_12
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), primals_10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf5, buf6, reinterpret_tensor(buf9, (16, 16), (16, 1), 0), primals_11, reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import init
def XNorm(x, gamma):
norm_tensor = torch.norm(x, 2, -1, True)
return x * gamma / norm_tensor
class UFOAttention(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model, d_k, d_v, h, dropout=0.1):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super(UFOAttention, self).__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.dropout = nn.Dropout(dropout)
self.gamma = nn.Parameter(torch.randn((1, h, 1, 1)))
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, queries, keys, values):
b_s, nq = queries.shape[:2]
nk = keys.shape[1]
q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2,
1, 3)
k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1)
v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2,
1, 3)
kv = torch.matmul(k, v)
kv_norm = XNorm(kv, self.gamma)
q_norm = XNorm(q, self.gamma)
out = torch.matmul(q_norm, kv_norm).permute(0, 2, 1, 3).contiguous(
).view(b_s, nq, self.h * self.d_v)
out = self.fc_o(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_model': 4, 'd_k': 4, 'd_v': 4, 'h': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_mul_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 16 % 4
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x5), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp2 / tmp14
tl.store(out_ptr0 + x4, tmp15, xmask)
@triton.jit
def triton_poi_fused_clone_div_linalg_vector_norm_mul_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4 % 4
x5 = xindex // 4
x0 = xindex % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x5), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x5), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x5), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = tmp2 / tmp14
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp15, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (16, 4), (4, 1))
assert_size_stride(primals_6, (16,), (1,))
assert_size_stride(primals_7, (16, 4), (4, 1))
assert_size_stride(primals_8, (16,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (4, 16), (16, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 16), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](buf1, primals_6, buf3, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_clone_1[grid(256)](buf2, primals_8, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf5 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_linalg_vector_norm_mul_2[grid(256)](buf5,
primals_10, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_div_linalg_vector_norm_mul_3[grid(256)](buf0,
primals_10, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(256)](buf8, buf9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf8
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_12, reinterpret_tensor(buf9, (16, 16),
(16, 1), 0), reinterpret_tensor(primals_11, (16, 4), (1, 16), 0
), alpha=1, beta=1, out=buf10)
del primals_12
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), primals_10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf5, buf6, reinterpret_tensor(buf9, (16, 16), (16, 1), 0
), primals_11, reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
def XNorm(x, gamma):
norm_tensor = torch.norm(x, 2, -1, True)
return x * gamma / norm_tensor
class UFOAttentionNew(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model, d_k, d_v, h, dropout=0.1):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super(UFOAttentionNew, self).__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.dropout = nn.Dropout(dropout)
self.gamma = nn.Parameter(torch.randn((1, h, 1, 1)))
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, input_0, input_1, input_2):
primals_10 = self.gamma
primals_3 = self.fc_q.weight
primals_4 = self.fc_q.bias
primals_5 = self.fc_k.weight
primals_6 = self.fc_k.bias
primals_7 = self.fc_v.weight
primals_8 = self.fc_v.bias
primals_11 = self.fc_o.weight
primals_12 = self.fc_o.bias
primals_1 = input_0
primals_2 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| weihaoxie/External-Attention-pytorch | UFOAttention | false | 4,532 | [
"MIT"
] | 0 | 9bec70f4ed8dd858c815e9bad240ab2f95a91a9f | https://github.com/weihaoxie/External-Attention-pytorch/tree/9bec70f4ed8dd858c815e9bad240ab2f95a91a9f | import torch
from torch import nn
from torch.nn import init
def XNorm(x, gamma):
norm_tensor = torch.norm(x, 2, -1, True)
return x * gamma / norm_tensor
class Model(nn.Module):
"""
Scaled dot-product attention
"""
def __init__(self, d_model, d_k, d_v, h, dropout=0.1):
"""
:param d_model: Output dimensionality of the model
:param d_k: Dimensionality of queries and keys
:param d_v: Dimensionality of values
:param h: Number of heads
"""
super().__init__()
self.fc_q = nn.Linear(d_model, h * d_k)
self.fc_k = nn.Linear(d_model, h * d_k)
self.fc_v = nn.Linear(d_model, h * d_v)
self.fc_o = nn.Linear(h * d_v, d_model)
self.dropout = nn.Dropout(dropout)
self.gamma = nn.Parameter(torch.randn((1, h, 1, 1)))
self.d_model = d_model
self.d_k = d_k
self.d_v = d_v
self.h = h
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, queries, keys, values):
b_s, nq = queries.shape[:2]
nk = keys.shape[1]
q = self.fc_q(queries).view(b_s, nq, self.h, self.d_k).permute(0, 2,
1, 3)
k = self.fc_k(keys).view(b_s, nk, self.h, self.d_k).permute(0, 2, 3, 1)
v = self.fc_v(values).view(b_s, nk, self.h, self.d_v).permute(0, 2,
1, 3)
kv = torch.matmul(k, v)
kv_norm = XNorm(kv, self.gamma)
q_norm = XNorm(q, self.gamma)
out = torch.matmul(q_norm, kv_norm).permute(0, 2, 1, 3).contiguous(
).view(b_s, nq, self.h * self.d_v)
out = self.fc_o(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [4, 4, 4, 4]
|
NaiveGate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yo/cyo66tzofrbi3pxzjycdnkjiyuupd7mqsess6fk2iymvwuhtbljk.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [gate], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
# Topologically Sorted Source Nodes: [topk], Original ATen: [aten.topk]
buf1 = torch.ops.aten.topk.default(reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0), 2, -1, True, False)
del buf0
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf2, buf4, 128, grid=grid(128), stream=stream0)
del buf2
return (reinterpret_tensor(buf3, (128, ), (1, ), 0), reinterpret_tensor(buf4, (64, 1, 2), (2, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class NaiveGate(nn.Module):
"""
A naive gate implementation that defines the standard behavior of the gate
which determines which experts the tokens are going to.
Both the indecies and the score, or confidence, are output to the parent
module.
The load-balance strategies are also designed to be implemented within the
`Gate` module.
"""
def __init__(self, d_model, num_expert, world_size, top_k=2):
super().__init__()
self.gate = nn.Linear(d_model, num_expert * world_size)
self.top_k = top_k
def forward(self, inp):
"""
The naive implementation simply calculates the top-k of a linear layer's
output.
"""
gate = self.gate(inp)
gate_top_k_val, gate_top_k_idx = torch.topk(gate, k=self.top_k, dim
=-1, largest=True, sorted=False)
gate_top_k_val = gate_top_k_val.view(-1, self.top_k)
gate_score = F.softmax(gate_top_k_val, dim=-1).unsqueeze(1)
gate_top_k_idx = gate_top_k_idx.view(-1)
return gate_top_k_idx, gate_score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'num_expert': 4, 'world_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = torch.ops.aten.topk.default(reinterpret_tensor(buf0, (4, 4,
4, 16), (256, 64, 16, 1), 0), 2, -1, True, False)
del buf0
buf2 = buf1[0]
buf3 = buf1[1]
del buf1
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(128)](buf2, buf4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return reinterpret_tensor(buf3, (128,), (1,), 0), reinterpret_tensor(buf4,
(64, 1, 2), (2, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (
4, 1), 0), buf3, buf4
class NaiveGateNew(nn.Module):
"""
A naive gate implementation that defines the standard behavior of the gate
which determines which experts the tokens are going to.
Both the indecies and the score, or confidence, are output to the parent
module.
The load-balance strategies are also designed to be implemented within the
`Gate` module.
"""
def __init__(self, d_model, num_expert, world_size, top_k=2):
super().__init__()
self.gate = nn.Linear(d_model, num_expert * world_size)
self.top_k = top_k
def forward(self, input_0):
primals_1 = self.gate.weight
primals_2 = self.gate.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| whn09/fastmoe | NaiveGate | false | 4,533 | [
"Apache-2.0"
] | 0 | d0ffaffc6431abcd3ea6d0287dbf09f8cd727a0a | https://github.com/whn09/fastmoe/tree/d0ffaffc6431abcd3ea6d0287dbf09f8cd727a0a | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
A naive gate implementation that defines the standard behavior of the gate
which determines which experts the tokens are going to.
Both the indecies and the score, or confidence, are output to the parent
module.
The load-balance strategies are also designed to be implemented within the
`Gate` module.
"""
def __init__(self, d_model, num_expert, world_size, top_k=2):
super().__init__()
self.gate = nn.Linear(d_model, num_expert * world_size)
self.top_k = top_k
def forward(self, inp):
"""
The naive implementation simply calculates the top-k of a linear layer's
output.
"""
gate = self.gate(inp)
gate_top_k_val, gate_top_k_idx = torch.topk(gate, k=self.top_k, dim
=-1, largest=True, sorted=False)
gate_top_k_val = gate_top_k_val.view(-1, self.top_k)
gate_score = F.softmax(gate_top_k_val, dim=-1).unsqueeze(1)
gate_top_k_idx = gate_top_k_idx.view(-1)
return gate_top_k_idx, gate_score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ActorNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bg/cbg32drchyezvbfwshguvyopixmzwi2llws7xkhvpdruis76tr2t.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# out_2 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oo/coo5rivaroinv27r7to5gs4jb7ce7itar6epfsastoa2ig6tj65k.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# out_2 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ActorNetwork(nn.Module):
def __init__(self, input_size, hidden_size, action_size):
super(ActorNetwork, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, action_size)
def forward(self, x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = F.log_softmax(self.fc3(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__log_softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8
class ActorNetworkNew(nn.Module):
def __init__(self, input_size, hidden_size, action_size):
super(ActorNetworkNew, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, action_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| whongyu/MA3C | ActorNetwork | false | 4,534 | [
"MIT"
] | 0 | d3b38cf42a909c0938624ba853119804efaf47eb | https://github.com/whongyu/MA3C/tree/d3b38cf42a909c0938624ba853119804efaf47eb | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_size, hidden_size, action_size):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, action_size)
def forward(self, x):
out = F.relu(self.fc1(x))
out = F.relu(self.fc2(out))
out = F.log_softmax(self.fc3(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ComplexBatchNormalize | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zj/czjd4gk7xzwy4hojfvo35lgyxe4nkfjguyrajnu2f2jvryoevie7.py
# Topologically Sorted Source Nodes: [phase, setitem], Original ATen: [aten.atan2, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# phase => atan2
# setitem => full_default, index_put
# Graph fragment:
# %atan2 : [num_users=2] = call_function[target=torch.ops.aten.atan2.default](args = (%squeeze_1, %squeeze), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%atan2, [%ne], %full_default), kwargs = {})
triton_poi_fused_atan2_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_atan2_index_put_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_atan2_index_put_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_atan2_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = libdevice.atan2(tmp0, tmp1)
tmp3 = tmp2 != tmp2
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wh/cwh3pilr24tt4wzehv4kii4m2ehr4pwbvjawqndhgx5gjo2ympl5.py
# Topologically Sorted Source Nodes: [x, setitem_1], Original ATen: [aten.stack, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_1 => full_default_1, index_put_1
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], 1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_1 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%cat, [%ne_1], %full_default_1), kwargs = {})
triton_poi_fused_index_put_lift_fresh_stack_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_stack_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_stack_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp5 * tmp5
tmp7 = tl.load(in_ptr0 + (1 + (4*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tmp14 = tl.full([1], 2, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tl.load(in_ptr1 + (x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp12, tmp16)
tmp18 = tmp17 != tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp17)
tl.store(out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bm/cbm36xvh2kbgn5p5eb6lqhj2dneqtr2zmw22ow42nd34c4vqxrka.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# x_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_2, %unsqueeze_3], 1), kwargs = {})
triton_poi_fused_stack_2 = async_compile.triton('triton_poi_fused_stack_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (2*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl_math.cos(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr0 + (2*x1), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (1 + (2*x1)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl_math.sin(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [phase, setitem], Original ATen: [aten.atan2, aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused_atan2_index_put_lift_fresh_0.run(arg0_1, buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, setitem_1], Original ATen: [aten.stack, aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_stack_1.run(arg0_1, buf0, buf1, 8, grid=grid(8), stream=stream0)
del arg0_1
del buf0
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.stack]
triton_poi_fused_stack_2.run(buf1, buf2, 8, grid=grid(8), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def cylindricalToPolarConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both real and imaginary channels
in the last dims"""
ndims = input1.ndimension()
real_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
imag_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
mag = (real_input ** 2 + imag_input ** 2) ** 0.5
phase = torch.atan2(imag_input, real_input)
phase[phase.ne(phase)] = 0.0
return torch.stack((mag, phase), dim=input1.ndimension() - 1)
else:
"""input1 is real part and input2 is imaginary part; both of size [B,C,H,W,D]"""
mag = (input1 ** 2 + input2 ** 2) ** 0.5
phase = torch.atan2(input2, input1)
phase[phase.ne(phase)] = 0.0
return mag, phase
def polarToCylindricalConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both magnitude and phase channels
in the last dims"""
ndims = input1.ndimension()
mag_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
phase_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
real = mag_input * torch.cos(phase_input)
imag = mag_input * torch.sin(phase_input)
return torch.stack((real, imag), dim=input1.ndimension() - 1)
else:
"""input1 is magnitude part and input2 is phase part; both of size [B,C,H,W,D]"""
real = input1 * torch.cos(input2)
imag = input1 * torch.sin(input2)
return real, imag
def normalizeComplexBatch_byMagnitudeOnly(x, polar=False):
""" normalize the complex batch by making the magnitude of mean 1 and std 1, and keep the phase as it is"""
ndims = x.ndimension()
shift_mean = 1
if not polar:
x = cylindricalToPolarConversion(x)
if ndims == 4:
mag = x[:, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 1, keepdim=
True).unsqueeze(mdims)) / torch.std(mag_shaped, mdims - 1,
keepdim=True).unsqueeze(mdims) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, 1]], dim=3)
elif ndims == 5:
mag = x[:, :, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2] *
mag.shape[3]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 2, keepdim=
True).unsqueeze(mdims - 1)) / torch.std(mag_shaped, mdims - 2,
keepdim=True).unsqueeze(mdims - 1) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, 1]], dim=4)
elif ndims == 6:
mag = x[:, :, :, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2] *
mag.shape[3] * mag.shape[4]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 3, keepdim=
True).unsqueeze(mdims - 2)) / torch.std(mag_shaped, mdims - 3,
keepdim=True).unsqueeze(mdims - 2) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, :, 1]], dim=5)
x[x.ne(x)] = 0.0
if not polar:
x = polarToCylindricalConversion(x)
return x
class ComplexBatchNormalize(nn.Module):
def __init__(self):
super(ComplexBatchNormalize, self).__init__()
def forward(self, input):
return normalizeComplexBatch_byMagnitudeOnly(input)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_atan2_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = libdevice.atan2(tmp0, tmp1)
tmp3 = tmp2 != tmp2
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_stack_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp5 * tmp5
tmp7 = tl.load(in_ptr0 + (1 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp16 = tl.load(in_ptr1 + x1, tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp12, tmp16)
tmp18 = tmp17 != tmp17
tmp19 = 0.0
tmp20 = tl.where(tmp18, tmp19, tmp17)
tl.store(out_ptr0 + x2, tmp20, xmask)
@triton.jit
def triton_poi_fused_stack_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 2 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 2 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl_math.cos(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp14 = tl.load(in_ptr0 + 2 * x1, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (1 + 2 * x1), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tl_math.sin(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_atan2_index_put_lift_fresh_0[grid(4)](arg0_1, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_poi_fused_index_put_lift_fresh_stack_1[grid(8)](arg0_1, buf0,
buf1, 8, XBLOCK=8, num_warps=1, num_stages=1)
del arg0_1
del buf0
buf2 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
triton_poi_fused_stack_2[grid(8)](buf1, buf2, 8, XBLOCK=8,
num_warps=1, num_stages=1)
del buf1
return buf2,
def cylindricalToPolarConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both real and imaginary channels
in the last dims"""
ndims = input1.ndimension()
real_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
imag_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
mag = (real_input ** 2 + imag_input ** 2) ** 0.5
phase = torch.atan2(imag_input, real_input)
phase[phase.ne(phase)] = 0.0
return torch.stack((mag, phase), dim=input1.ndimension() - 1)
else:
"""input1 is real part and input2 is imaginary part; both of size [B,C,H,W,D]"""
mag = (input1 ** 2 + input2 ** 2) ** 0.5
phase = torch.atan2(input2, input1)
phase[phase.ne(phase)] = 0.0
return mag, phase
def polarToCylindricalConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both magnitude and phase channels
in the last dims"""
ndims = input1.ndimension()
mag_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
phase_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
real = mag_input * torch.cos(phase_input)
imag = mag_input * torch.sin(phase_input)
return torch.stack((real, imag), dim=input1.ndimension() - 1)
else:
"""input1 is magnitude part and input2 is phase part; both of size [B,C,H,W,D]"""
real = input1 * torch.cos(input2)
imag = input1 * torch.sin(input2)
return real, imag
def normalizeComplexBatch_byMagnitudeOnly(x, polar=False):
""" normalize the complex batch by making the magnitude of mean 1 and std 1, and keep the phase as it is"""
ndims = x.ndimension()
shift_mean = 1
if not polar:
x = cylindricalToPolarConversion(x)
if ndims == 4:
mag = x[:, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 1, keepdim=
True).unsqueeze(mdims)) / torch.std(mag_shaped, mdims - 1,
keepdim=True).unsqueeze(mdims) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, 1]], dim=3)
elif ndims == 5:
mag = x[:, :, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2] *
mag.shape[3]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 2, keepdim=
True).unsqueeze(mdims - 1)) / torch.std(mag_shaped, mdims - 2,
keepdim=True).unsqueeze(mdims - 1) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, 1]], dim=4)
elif ndims == 6:
mag = x[:, :, :, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2] *
mag.shape[3] * mag.shape[4]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 3, keepdim=
True).unsqueeze(mdims - 2)) / torch.std(mag_shaped, mdims - 3,
keepdim=True).unsqueeze(mdims - 2) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, :, 1]], dim=5)
x[x.ne(x)] = 0.0
if not polar:
x = polarToCylindricalConversion(x)
return x
class ComplexBatchNormalizeNew(nn.Module):
def __init__(self):
super(ComplexBatchNormalizeNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| wizofe/urus-mri-recon | ComplexBatchNormalize | false | 4,535 | [
"MIT"
] | 0 | eab8e48dca31d2b936ce69ccc251ec5a4a10facc | https://github.com/wizofe/urus-mri-recon/tree/eab8e48dca31d2b936ce69ccc251ec5a4a10facc | import torch
import torch.nn as nn
def cylindricalToPolarConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both real and imaginary channels
in the last dims"""
ndims = input1.ndimension()
real_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
imag_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
mag = (real_input ** 2 + imag_input ** 2) ** 0.5
phase = torch.atan2(imag_input, real_input)
phase[phase.ne(phase)] = 0.0
return torch.stack((mag, phase), dim=input1.ndimension() - 1)
else:
"""input1 is real part and input2 is imaginary part; both of size [B,C,H,W,D]"""
mag = (input1 ** 2 + input2 ** 2) ** 0.5
phase = torch.atan2(input2, input1)
phase[phase.ne(phase)] = 0.0
return mag, phase
def polarToCylindricalConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both magnitude and phase channels
in the last dims"""
ndims = input1.ndimension()
mag_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
phase_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
real = mag_input * torch.cos(phase_input)
imag = mag_input * torch.sin(phase_input)
return torch.stack((real, imag), dim=input1.ndimension() - 1)
else:
"""input1 is magnitude part and input2 is phase part; both of size [B,C,H,W,D]"""
real = input1 * torch.cos(input2)
imag = input1 * torch.sin(input2)
return real, imag
def normalizeComplexBatch_byMagnitudeOnly(x, polar=False):
""" normalize the complex batch by making the magnitude of mean 1 and std 1, and keep the phase as it is"""
ndims = x.ndimension()
shift_mean = 1
if not polar:
x = cylindricalToPolarConversion(x)
if ndims == 4:
mag = x[:, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 1, keepdim=
True).unsqueeze(mdims)) / torch.std(mag_shaped, mdims - 1,
keepdim=True).unsqueeze(mdims) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, 1]], dim=3)
elif ndims == 5:
mag = x[:, :, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2] *
mag.shape[3]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 2, keepdim=
True).unsqueeze(mdims - 1)) / torch.std(mag_shaped, mdims - 2,
keepdim=True).unsqueeze(mdims - 1) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, 1]], dim=4)
elif ndims == 6:
mag = x[:, :, :, :, :, 0]
mdims = mag.ndimension()
mag_shaped = mag.reshape((mag.shape[0], mag.shape[1], mag.shape[2] *
mag.shape[3] * mag.shape[4]))
normalized_mag = (mag - torch.mean(mag_shaped, mdims - 3, keepdim=
True).unsqueeze(mdims - 2)) / torch.std(mag_shaped, mdims - 3,
keepdim=True).unsqueeze(mdims - 2) + shift_mean
x = torch.stack([normalized_mag, x[:, :, :, :, :, 1]], dim=5)
x[x.ne(x)] = 0.0
if not polar:
x = polarToCylindricalConversion(x)
return x
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return normalizeComplexBatch_byMagnitudeOnly(input)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return []
|
Channel_mean | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ll/cllr35pbrslqfjtruvdn2o7c6ryc4bnrvsj4b65rxcqq5miw7mfh.py
# Topologically Sorted Source Nodes: [sum_1, squeeze], Original ATen: [aten.sum, aten.squeeze]
# Source node to ATen node mapping:
# squeeze => squeeze
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%select, [0]), kwargs = {})
# %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%sum_1,), kwargs = {})
triton_poi_fused_squeeze_sum_0 = async_compile.triton('triton_poi_fused_squeeze_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_squeeze_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_squeeze_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, squeeze], Original ATen: [aten.sum, aten.squeeze]
stream0 = get_raw_stream(0)
triton_poi_fused_squeeze_sum_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Channel_mean(nn.Module):
def __init__(self) ->None:
super().__init__()
def forward(self, V):
"""
only V[0]
"""
return torch.sum(V[0], dim=0).squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_squeeze_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_squeeze_sum_0[grid(16)](arg0_1, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class Channel_meanNew(nn.Module):
def __init__(self) ->None:
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| wk989898/ARES-implement | Channel_mean | false | 4,536 | [
"MIT"
] | 0 | b2411be01124feaccbc89d74f6025fbfa584bb3f | https://github.com/wk989898/ARES-implement/tree/b2411be01124feaccbc89d74f6025fbfa584bb3f | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self) ->None:
super().__init__()
def forward(self, V):
"""
only V[0]
"""
return torch.sum(V[0], dim=0).squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MnistFeatureExtractor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lj/clj3mscf4tqz7wlbw6be5v4cyjekfwhnnfmmz6ngsoi4fvpbcy27.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3600) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ne/cneqdxjdk5oiyfj25d72xf36akzgouwh67fknxl3rdhbdhnqrqj7.py
# Topologically Sorted Source Nodes: [max_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.leaky_relu]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1
# x => gt, mul, where
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%getitem, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %getitem, %mul), kwargs = {})
triton_poi_fused_leaky_relu_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_leaky_relu_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x3 = (xindex // 30)
x2 = (xindex // 9000)
x4 = xindex % 9000
x5 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.01
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(out_ptr0 + (x4 + (9088*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x5), tmp18, xmask)
tl.store(out_ptr2 + (x5), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/aw/cawzukwabtkzagdqmtxehml5xtoaxgeoa4r6pkdmncwtapog2lg5.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 54080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 676) % 20
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tk/ctkofe4daqi7tsia4n5ahb4h5mao5uffyrmy3tl6ymyz6ry6rquu.py
# Topologically Sorted Source Nodes: [max_pool2d_1, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.leaky_relu]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%getitem_2, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_2, 0.01), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %getitem_2, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_leaky_relu_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 13520
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 13
x3 = (xindex // 13)
x2 = (xindex // 3380)
x4 = xindex % 3380
x5 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (52*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (26 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (27 + (2*x0) + (52*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.01
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(out_ptr0 + (x4 + (3456*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x4 + (3456*x2)), tmp18, xmask)
tl.store(out_ptr2 + (x5), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (10, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 144000, grid=grid(144000), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 10, 30, 30), (9088, 900, 30, 1), torch.int8)
buf3 = empty_strided_cuda((4, 10, 30, 30), (9000, 900, 30, 1), torch.bool)
buf4 = empty_strided_cuda((4, 10, 30, 30), (9000, 900, 30, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.leaky_relu]
triton_poi_fused_leaky_relu_max_pool2d_with_indices_1.run(buf1, buf2, buf3, buf4, 36000, grid=grid(36000), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 20, 26, 26), (13520, 676, 26, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf6, primals_5, 54080, grid=grid(54080), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 20, 13, 13), (3456, 169, 13, 1), torch.int8)
buf8 = empty_strided_cuda((4, 20, 13, 13), (3456, 169, 13, 1), torch.bool)
buf9 = empty_strided_cuda((4, 20, 13, 13), (3380, 169, 13, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d_1, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.leaky_relu]
triton_poi_fused_leaky_relu_max_pool2d_with_indices_3.run(buf6, buf7, buf8, buf9, 13520, grid=grid(13520), stream=stream0)
return (reinterpret_tensor(buf9, (4, 3380), (3380, 1), 0), primals_1, primals_3, primals_4, buf1, buf2, buf3, buf4, buf6, buf7, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 10, 5, 5), (250, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MnistFeatureExtractor(nn.Module):
def __init__(self, activation=F.leaky_relu):
super(MnistFeatureExtractor, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.activation = activation
def get_mtx(self):
return None
def forward(self, x):
x = self.activation(F.max_pool2d(self.conv1(x), 2))
x = self.activation(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
return x.view(x.size(0), -1)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_max_pool2d_with_indices_1(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x3 = xindex // 30
x2 = xindex // 9000
x4 = xindex % 9000
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.01
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(out_ptr0 + (x4 + 9088 * x2), tmp15, xmask)
tl.store(out_ptr1 + x5, tmp18, xmask)
tl.store(out_ptr2 + x5, tmp21, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 54080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 676 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_max_pool2d_with_indices_3(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 13520
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 13
x3 = xindex // 13
x2 = xindex // 3380
x4 = xindex % 3380
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 52 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 52 * x3), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (26 + 2 * x0 + 52 * x3), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (27 + 2 * x0 + 52 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.01
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(out_ptr0 + (x4 + 3456 * x2), tmp15, xmask)
tl.store(out_ptr1 + (x4 + 3456 * x2), tmp18, xmask)
tl.store(out_ptr2 + x5, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (10, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 5, 5), (250, 25, 5, 1))
assert_size_stride(primals_5, (20,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(144000)](buf1, primals_2,
144000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 10, 30, 30), (9088, 900, 30, 1),
torch.int8)
buf3 = empty_strided_cuda((4, 10, 30, 30), (9000, 900, 30, 1),
torch.bool)
buf4 = empty_strided_cuda((4, 10, 30, 30), (9000, 900, 30, 1),
torch.float32)
triton_poi_fused_leaky_relu_max_pool2d_with_indices_1[grid(36000)](buf1
, buf2, buf3, buf4, 36000, XBLOCK=256, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 20, 26, 26), (13520, 676, 26, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_2[grid(54080)](buf6, primals_5, 54080,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 20, 13, 13), (3456, 169, 13, 1),
torch.int8)
buf8 = empty_strided_cuda((4, 20, 13, 13), (3456, 169, 13, 1),
torch.bool)
buf9 = empty_strided_cuda((4, 20, 13, 13), (3380, 169, 13, 1),
torch.float32)
triton_poi_fused_leaky_relu_max_pool2d_with_indices_3[grid(13520)](buf6
, buf7, buf8, buf9, 13520, XBLOCK=256, num_warps=4, num_stages=1)
return (reinterpret_tensor(buf9, (4, 3380), (3380, 1), 0), primals_1,
primals_3, primals_4, buf1, buf2, buf3, buf4, buf6, buf7, buf8)
class MnistFeatureExtractorNew(nn.Module):
def __init__(self, activation=F.leaky_relu):
super(MnistFeatureExtractorNew, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.activation = activation
def get_mtx(self):
return None
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| wiatrak2/BScThesis | MnistFeatureExtractor | false | 4,537 | [
"MIT"
] | 0 | e5dd012fd9052e7088d8464b409dc055dbfcf840 | https://github.com/wiatrak2/BScThesis/tree/e5dd012fd9052e7088d8464b409dc055dbfcf840 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, activation=F.leaky_relu):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.activation = activation
def get_mtx(self):
return None
def forward(self, x):
x = self.activation(F.max_pool2d(self.conv1(x), 2))
x = self.activation(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
return x.view(x.size(0), -1)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
AspectMean | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/2l/c2lskgt456tssjss33age2ge3rxkx423h7s6kanacrjo7kblt4m4.py
# Topologically Sorted Source Nodes: [ne, len_tmp, ne_1, sum_2], Original ATen: [aten.ne, aten.sum]
# Source node to ATen node mapping:
# len_tmp => sum_1
# ne => ne
# ne_1 => ne_1
# sum_2 => sum_2
# Graph fragment:
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg0_1, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%ne, [2]), kwargs = {})
# %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%sum_1, 0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%ne_1, [1]), kwargs = {})
triton_poi_fused_ne_sum_0 = async_compile.triton('triton_poi_fused_ne_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ne_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_ne_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp55 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp66 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int64)
tmp5 = tmp4 != tmp1
tmp6 = tmp5.to(tl.int64)
tmp7 = tmp3 + tmp6
tmp9 = tmp8 != tmp1
tmp10 = tmp9.to(tl.int64)
tmp11 = tmp7 + tmp10
tmp13 = tmp12 != tmp1
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp11 + tmp14
tmp16 = tl.full([1], 0, tl.int64)
tmp17 = tmp15 != tmp16
tmp18 = tmp17.to(tl.int64)
tmp20 = tmp19 != tmp1
tmp21 = tmp20.to(tl.int64)
tmp23 = tmp22 != tmp1
tmp24 = tmp23.to(tl.int64)
tmp25 = tmp21 + tmp24
tmp27 = tmp26 != tmp1
tmp28 = tmp27.to(tl.int64)
tmp29 = tmp25 + tmp28
tmp31 = tmp30 != tmp1
tmp32 = tmp31.to(tl.int64)
tmp33 = tmp29 + tmp32
tmp34 = tmp33 != tmp16
tmp35 = tmp34.to(tl.int64)
tmp36 = tmp18 + tmp35
tmp38 = tmp37 != tmp1
tmp39 = tmp38.to(tl.int64)
tmp41 = tmp40 != tmp1
tmp42 = tmp41.to(tl.int64)
tmp43 = tmp39 + tmp42
tmp45 = tmp44 != tmp1
tmp46 = tmp45.to(tl.int64)
tmp47 = tmp43 + tmp46
tmp49 = tmp48 != tmp1
tmp50 = tmp49.to(tl.int64)
tmp51 = tmp47 + tmp50
tmp52 = tmp51 != tmp16
tmp53 = tmp52.to(tl.int64)
tmp54 = tmp36 + tmp53
tmp56 = tmp55 != tmp1
tmp57 = tmp56.to(tl.int64)
tmp59 = tmp58 != tmp1
tmp60 = tmp59.to(tl.int64)
tmp61 = tmp57 + tmp60
tmp63 = tmp62 != tmp1
tmp64 = tmp63.to(tl.int64)
tmp65 = tmp61 + tmp64
tmp67 = tmp66 != tmp1
tmp68 = tmp67.to(tl.int64)
tmp69 = tmp65 + tmp68
tmp70 = tmp69 != tmp16
tmp71 = tmp70.to(tl.int64)
tmp72 = tmp54 + tmp71
tl.store(out_ptr0 + (x0), tmp72, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yw/cyweluarz5kplvpgbfpvpwel67ohfhi5kugx7pjyaikjat2lg4ps.py
# Topologically Sorted Source Nodes: [out, aspect_len, div], Original ATen: [aten.sum, aten._to_copy, aten.div]
# Source node to ATen node mapping:
# aspect_len => convert_element_type
# div => div
# out => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1]), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%unsqueeze, torch.float32), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %convert_element_type), kwargs = {})
triton_poi_fused__to_copy_div_sum_1 = async_compile.triton('triton_poi_fused__to_copy_div_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_div_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_div_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp6 / tmp8
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [ne, len_tmp, ne_1, sum_2], Original ATen: [aten.ne, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_ne_sum_0.run(arg0_1, buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, aspect_len, div], Original ATen: [aten.sum, aten._to_copy, aten.div]
triton_poi_fused__to_copy_div_sum_1.run(arg0_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
del arg0_1
del buf0
return (reinterpret_tensor(buf1, (4, 4, 4), (4, 0, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AspectMean(nn.Module):
def __init__(self, max_sen_len):
"""
:param max_sen_len: maximum length of sentence
"""
super(AspectMean, self).__init__()
self.max_sen_len = max_sen_len
def forward(self, aspect):
"""
:param aspect: size: [batch_size, max_asp_len, embed_size]
:return: aspect mean embedding, size: [batch_size, max_sen_len, embed_size]
"""
len_tmp = torch.sum(aspect != 0, dim=2)
aspect_len = torch.sum(len_tmp != 0, dim=1).unsqueeze(dim=1).float()
out = aspect.sum(dim=1)
out = out.div(aspect_len).unsqueeze(dim=1).expand(-1, self.
max_sen_len, -1)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'max_sen_len': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_ne_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp37 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp40 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp44 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp48 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp55 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp58 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp62 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp66 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = 0.0
tmp2 = tmp0 != tmp1
tmp3 = tmp2.to(tl.int64)
tmp5 = tmp4 != tmp1
tmp6 = tmp5.to(tl.int64)
tmp7 = tmp3 + tmp6
tmp9 = tmp8 != tmp1
tmp10 = tmp9.to(tl.int64)
tmp11 = tmp7 + tmp10
tmp13 = tmp12 != tmp1
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp11 + tmp14
tmp16 = tl.full([1], 0, tl.int64)
tmp17 = tmp15 != tmp16
tmp18 = tmp17.to(tl.int64)
tmp20 = tmp19 != tmp1
tmp21 = tmp20.to(tl.int64)
tmp23 = tmp22 != tmp1
tmp24 = tmp23.to(tl.int64)
tmp25 = tmp21 + tmp24
tmp27 = tmp26 != tmp1
tmp28 = tmp27.to(tl.int64)
tmp29 = tmp25 + tmp28
tmp31 = tmp30 != tmp1
tmp32 = tmp31.to(tl.int64)
tmp33 = tmp29 + tmp32
tmp34 = tmp33 != tmp16
tmp35 = tmp34.to(tl.int64)
tmp36 = tmp18 + tmp35
tmp38 = tmp37 != tmp1
tmp39 = tmp38.to(tl.int64)
tmp41 = tmp40 != tmp1
tmp42 = tmp41.to(tl.int64)
tmp43 = tmp39 + tmp42
tmp45 = tmp44 != tmp1
tmp46 = tmp45.to(tl.int64)
tmp47 = tmp43 + tmp46
tmp49 = tmp48 != tmp1
tmp50 = tmp49.to(tl.int64)
tmp51 = tmp47 + tmp50
tmp52 = tmp51 != tmp16
tmp53 = tmp52.to(tl.int64)
tmp54 = tmp36 + tmp53
tmp56 = tmp55 != tmp1
tmp57 = tmp56.to(tl.int64)
tmp59 = tmp58 != tmp1
tmp60 = tmp59.to(tl.int64)
tmp61 = tmp57 + tmp60
tmp63 = tmp62 != tmp1
tmp64 = tmp63.to(tl.int64)
tmp65 = tmp61 + tmp64
tmp67 = tmp66 != tmp1
tmp68 = tmp67.to(tl.int64)
tmp69 = tmp65 + tmp68
tmp70 = tmp69 != tmp16
tmp71 = tmp70.to(tl.int64)
tmp72 = tmp54 + tmp71
tl.store(out_ptr0 + x0, tmp72, xmask)
@triton.jit
def triton_poi_fused__to_copy_div_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp6 / tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_ne_sum_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__to_copy_div_sum_1[grid(16)](arg0_1, buf0, buf1,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del buf0
return reinterpret_tensor(buf1, (4, 4, 4), (4, 0, 1), 0),
class AspectMeanNew(nn.Module):
def __init__(self, max_sen_len):
"""
:param max_sen_len: maximum length of sentence
"""
super(AspectMeanNew, self).__init__()
self.max_sen_len = max_sen_len
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| williamSYSU/ABSA-william | AspectMean | false | 4,538 | [
"MIT"
] | 0 | 84ccd3dca00e84c7fefadb9f5835216b2c4fe1df | https://github.com/williamSYSU/ABSA-william/tree/84ccd3dca00e84c7fefadb9f5835216b2c4fe1df | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, max_sen_len):
"""
:param max_sen_len: maximum length of sentence
"""
super().__init__()
self.max_sen_len = max_sen_len
def forward(self, aspect):
"""
:param aspect: size: [batch_size, max_asp_len, embed_size]
:return: aspect mean embedding, size: [batch_size, max_sen_len, embed_size]
"""
len_tmp = torch.sum(aspect != 0, dim=2)
aspect_len = torch.sum(len_tmp != 0, dim=1).unsqueeze(dim=1).float()
out = aspect.sum(dim=1)
out = out.div(aspect_len).unsqueeze(dim=1).expand(-1, self.
max_sen_len, -1)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
ComplexConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/75/c75kmue6qd44hzaqpn7rrmrp5xm7szydl3qiglyxzw3rfrnp6sdi.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# output => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_4, %unsqueeze_5], 3), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr2 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tl.load(in_ptr3 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tmp7 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 2, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr4 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr5 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + (x2), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 0), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 1, 1), (4, 1, 1, 1))
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 1), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 1, 1), (4, 1, 1, 1))
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 1), primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 1, 1), (4, 1, 1, 1))
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4, 4), (0, 64, 16, 4), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 1, 1), (4, 1, 1, 1))
buf4 = empty_strided_cuda((4, 1, 1, 2), (2, 2, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(buf0, primals_3, buf1, primals_5, buf2, buf3, buf4, 8, grid=grid(8), stream=stream0)
del buf0
del buf1
del buf2
del buf3
del primals_3
del primals_5
return (buf4, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 16, 4), 0), reinterpret_tensor(primals_1, (1, 4, 4, 4), (256, 64, 16, 4), 1), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ComplexConv(nn.Module):
def __init__(self, rank, in_channels, out_channels, kernel_size, stride
=1, padding=0, output_padding=0, dilation=1, groups=1, bias=True,
normalize_weight=False, epsilon=1e-07, conv_transposed=False):
super(ComplexConv, self).__init__()
self.rank = rank
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.conv_transposed = conv_transposed
self.conv_args = {'in_channels': self.in_channels, 'out_channels':
self.out_channels, 'kernel_size': self.kernel_size, 'stride':
self.stride, 'padding': self.padding, 'groups': self.groups,
'bias': self.bias}
if self.conv_transposed:
self.conv_args['output_padding'] = self.output_padding
else:
self.conv_args['dilation'] = self.dilation
self.conv_func = {(1): nn.Conv1d if not self.conv_transposed else
nn.ConvTranspose1d, (2): nn.Conv2d if not self.conv_transposed else
nn.ConvTranspose2d, (3): nn.Conv3d if not self.conv_transposed else
nn.ConvTranspose3d}[self.rank]
self.real_conv = self.conv_func(**self.conv_args)
self.imag_conv = self.conv_func(**self.conv_args)
def forward(self, input):
"""
assume complex input z = x + iy needs to be convolved by complex filter h = a + ib
where Output O = z * h, where * is convolution operator, then O = x*a + i(x*b)+ i(y*a) - y*b
so we need to calculate each of the 4 convolution operations in the previous equation,
one simple way to implement this as two conolution layers, one layer for the real weights (a)
and the other for imaginary weights (b), this can be done by concatenating both real and imaginary
parts of the input and convolve over both of them as follows:
c_r = [x; y] * a , and c_i= [x; y] * b, so that
O_real = c_r[real] - c_i[real], and O_imag = c_r[imag] - c_i[imag]
"""
ndims = input.ndimension()
input_real = input.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
input_imag = input.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
output_real = self.real_conv(input_real) - self.imag_conv(input_imag)
output_imag = self.real_conv(input_imag) + self.imag_conv(input_real)
output = torch.stack([output_real, output_imag], dim=ndims - 1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'rank': 2, 'in_channels': 4, 'out_channels': 4,
'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr2 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp9 = tl.load(in_ptr3 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tmp7 - tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp17 = tl.load(in_ptr4 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.load(in_ptr5 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tmp19 + tmp22
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 0), primals_2, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 1, 1), (4, 1, 1, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 1), primals_4, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 1, 1), (4, 1, 1, 1))
buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 1), primals_2, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 1, 1), (4, 1, 1, 1))
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4, 4), (0, 64, 16, 4), 0), primals_4, stride=(1, 1), padding
=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0,
0), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 1, 1), (4, 1, 1, 1))
buf4 = empty_strided_cuda((4, 1, 1, 2), (2, 2, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(8)](buf0, primals_3, buf1, primals_5,
buf2, buf3, buf4, 8, XBLOCK=8, num_warps=1, num_stages=1)
del buf0
del buf1
del buf2
del buf3
del primals_3
del primals_5
return buf4, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4,
4, 4), (256, 64, 16, 4), 0), reinterpret_tensor(primals_1, (1, 4, 4,
4), (256, 64, 16, 4), 1)
class ComplexConvNew(nn.Module):
def __init__(self, rank, in_channels, out_channels, kernel_size, stride
=1, padding=0, output_padding=0, dilation=1, groups=1, bias=True,
normalize_weight=False, epsilon=1e-07, conv_transposed=False):
super(ComplexConvNew, self).__init__()
self.rank = rank
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.conv_transposed = conv_transposed
self.conv_args = {'in_channels': self.in_channels, 'out_channels':
self.out_channels, 'kernel_size': self.kernel_size, 'stride':
self.stride, 'padding': self.padding, 'groups': self.groups,
'bias': self.bias}
if self.conv_transposed:
self.conv_args['output_padding'] = self.output_padding
else:
self.conv_args['dilation'] = self.dilation
self.conv_func = {(1): nn.Conv1d if not self.conv_transposed else
nn.ConvTranspose1d, (2): nn.Conv2d if not self.conv_transposed else
nn.ConvTranspose2d, (3): nn.Conv3d if not self.conv_transposed else
nn.ConvTranspose3d}[self.rank]
self.real_conv = self.conv_func(**self.conv_args)
self.imag_conv = self.conv_func(**self.conv_args)
def forward(self, input_0):
primals_1 = self.real_conv.weight
primals_3 = self.real_conv.bias
primals_2 = self.imag_conv.weight
primals_5 = self.imag_conv.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| wizofe/urus-mri-recon | ComplexConv | false | 4,539 | [
"MIT"
] | 0 | eab8e48dca31d2b936ce69ccc251ec5a4a10facc | https://github.com/wizofe/urus-mri-recon/tree/eab8e48dca31d2b936ce69ccc251ec5a4a10facc | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, rank, in_channels, out_channels, kernel_size, stride
=1, padding=0, output_padding=0, dilation=1, groups=1, bias=True,
normalize_weight=False, epsilon=1e-07, conv_transposed=False):
super().__init__()
self.rank = rank
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.conv_transposed = conv_transposed
self.conv_args = {'in_channels': self.in_channels, 'out_channels':
self.out_channels, 'kernel_size': self.kernel_size, 'stride':
self.stride, 'padding': self.padding, 'groups': self.groups,
'bias': self.bias}
if self.conv_transposed:
self.conv_args['output_padding'] = self.output_padding
else:
self.conv_args['dilation'] = self.dilation
self.conv_func = {(1): nn.Conv1d if not self.conv_transposed else
nn.ConvTranspose1d, (2): nn.Conv2d if not self.conv_transposed else
nn.ConvTranspose2d, (3): nn.Conv3d if not self.conv_transposed else
nn.ConvTranspose3d}[self.rank]
self.real_conv = self.conv_func(**self.conv_args)
self.imag_conv = self.conv_func(**self.conv_args)
def forward(self, input):
"""
assume complex input z = x + iy needs to be convolved by complex filter h = a + ib
where Output O = z * h, where * is convolution operator, then O = x*a + i(x*b)+ i(y*a) - y*b
so we need to calculate each of the 4 convolution operations in the previous equation,
one simple way to implement this as two conolution layers, one layer for the real weights (a)
and the other for imaginary weights (b), this can be done by concatenating both real and imaginary
parts of the input and convolve over both of them as follows:
c_r = [x; y] * a , and c_i= [x; y] * b, so that
O_real = c_r[real] - c_i[real], and O_imag = c_r[imag] - c_i[imag]
"""
ndims = input.ndimension()
input_real = input.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
input_imag = input.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
output_real = self.real_conv(input_real) - self.imag_conv(input_imag)
output_imag = self.real_conv(input_imag) + self.imag_conv(input_real)
output = torch.stack([output_real, output_imag], dim=ndims - 1)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'rank': 2, 'in_channels': 4, 'out_channels': 4,
'kernel_size': 4}]
|
TotalVariations | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/mw/cmw3ct3tmzx5hftcf6uanhehegjkcunx2jxce3wo4vzsg6pn6isf.py
# Topologically Sorted Source Nodes: [sub, abs_1, sum_1, sub_1, abs_2, sum_2, add], Original ATen: [aten.sub, aten.abs, aten.sum, aten.add]
# Source node to ATen node mapping:
# abs_1 => abs_1
# abs_2 => abs_2
# add => add
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_6), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_8, %slice_11), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_abs_add_sub_sum_0 = async_compile.triton('triton_per_fused_abs_add_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 12
r1 = (rindex // 12)
r2 = rindex % 48
r3 = (rindex // 48)
tmp0 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (r2 + (64*r3)), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (16 + r2 + (64*r3)), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, abs_1, sum_1, sub_1, abs_2, sum_2, add], Original ATen: [aten.sub, aten.abs, aten.sum, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_sub_sum_0.run(buf2, arg0_1, 1, 192, grid=grid(1), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn.modules.loss import _Loss
class TotalVariations(_Loss):
def forward(self, img1):
return torch.sum(torch.abs(img1[:, :, :-1] - img1[:, :, 1:])
) + torch.sum(torch.abs(img1[:, :-1, :] - img1[:, 1:, :]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn.modules.loss import _Loss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 48
r3 = rindex // 48
tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (r2 + 64 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (16 + r2 + 64 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tl_math.abs(tmp10)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tmp7 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_sub_sum_0[grid(1)](buf2, arg0_1, 1, 192,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TotalVariationsNew(_Loss):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| wizofe/urus-mri-recon | TotalVariations | false | 4,540 | [
"MIT"
] | 0 | eab8e48dca31d2b936ce69ccc251ec5a4a10facc | https://github.com/wizofe/urus-mri-recon/tree/eab8e48dca31d2b936ce69ccc251ec5a4a10facc | import torch
from torch.nn.modules.loss import _Loss
class Model(_Loss):
def forward(self, img1):
return torch.sum(torch.abs(img1[:, :, :-1] - img1[:, :, 1:])
) + torch.sum(torch.abs(img1[:, :-1, :] - img1[:, 1:, :]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CenteredL1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/y4/cy4xfhdjskjemnrwhssetjm6tv72wjovz4gihrglwkv7zsd2pynj.py
# Topologically Sorted Source Nodes: [l1_loss], Original ATen: [aten.sub, aten.abs, aten.mean]
# Source node to ATen node mapping:
# l1_loss => abs_1, mean, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_4, %slice_8), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
triton_poi_fused_abs_mean_sub_0 = async_compile.triton('triton_poi_fused_abs_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=(1,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_mean_sub_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = 0.0
tmp1 = tmp0 / tmp0
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp1, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [l1_loss], Original ATen: [aten.sub, aten.abs, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_mean_sub_0.run(buf0, 1, grid=grid(1), stream=stream0)
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn.functional import l1_loss
class CenteredL1Loss(torch.nn.Module):
def __init__(self, margin):
super(CenteredL1Loss, self).__init__()
self.m = margin
def forward(self, true, preds):
return l1_loss(preds[:, :, self.m:-self.m, self.m:-self.m], true[:,
:, self.m:-self.m, self.m:-self.m])
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_mean_sub_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = 0.0
tmp1 = tmp0 / tmp0
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp1, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_mean_sub_0[grid(1)](buf0, 1, XBLOCK=1,
num_warps=1, num_stages=1)
return buf0,
class CenteredL1LossNew(torch.nn.Module):
def __init__(self, margin):
super(CenteredL1LossNew, self).__init__()
self.m = margin
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| wsdea/EfficientSR | CenteredL1Loss | false | 4,541 | [
"MIT"
] | 0 | 077dea18c90e0d5bed722c609a776033c09f80e6 | https://github.com/wsdea/EfficientSR/tree/077dea18c90e0d5bed722c609a776033c09f80e6 | import torch
from torch.nn.functional import l1_loss
class Model(torch.nn.Module):
def __init__(self, margin):
super().__init__()
self.m = margin
def forward(self, true, preds):
return l1_loss(preds[:, :, self.m:-self.m, self.m:-self.m], true[:,
:, self.m:-self.m, self.m:-self.m])
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ZReLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/rf/crf5qjuyzipgldvln4flh6fivj2bpqhsg7fdnfcicchio5lef5y3.py
# Topologically Sorted Source Nodes: [phase, setitem], Original ATen: [aten.atan2, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# phase => atan2
# setitem => full_default, index_put
# Graph fragment:
# %atan2 : [num_users=2] = call_function[target=torch.ops.aten.atan2.default](args = (%squeeze_1, %squeeze), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%atan2, [%ne], %full_default), kwargs = {})
triton_poi_fused_atan2_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_atan2_index_put_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_atan2_index_put_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_atan2_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tmp2 = libdevice.atan2(tmp0, tmp1)
tmp3 = tmp2 != tmp2
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dt/cdt7dp6a4tuswlwlcr3mpluacla2nb2gzowjvku4qsydztxigo7w.py
# Topologically Sorted Source Nodes: [phase_2, le, ge, tensor, output, tensor_1, output_1], Original ATen: [aten.cat, aten.le, aten.ge, aten.lift_fresh, aten.where]
# Source node to ATen node mapping:
# ge => ge
# le => le
# output => where
# output_1 => where_1
# phase_2 => clone
# tensor => full_default_1
# tensor_1 => full_default_2
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%clone, 1.5707963267948966), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%clone, 0.0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ge, %arg0_1, %full_default_1), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%le, %where, %full_default_2), kwargs = {})
triton_poi_fused_cat_ge_le_lift_fresh_where_1 = async_compile.triton('triton_poi_fused_cat_ge_le_lift_fresh_where_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_ge_le_lift_fresh_where_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_ge_le_lift_fresh_where_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x2), xmask)
tmp1 = 1.5707963267948966
tmp2 = tmp0 <= tmp1
tmp3 = 0.0
tmp4 = tmp0 >= tmp3
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.where(tmp2, tmp6, tmp3)
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 2), (32, 8, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [phase, setitem], Original ATen: [aten.atan2, aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused_atan2_index_put_lift_fresh_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [phase_2, le, ge, tensor, output, tensor_1, output_1], Original ATen: [aten.cat, aten.le, aten.ge, aten.lift_fresh, aten.where]
triton_poi_fused_cat_ge_le_lift_fresh_where_1.run(buf0, arg0_1, buf1, 128, grid=grid(128), stream=stream0)
del arg0_1
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 2), (32, 8, 2, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
def cylindricalToPolarConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both real and imaginary channels
in the last dims"""
ndims = input1.ndimension()
real_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
imag_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
mag = (real_input ** 2 + imag_input ** 2) ** 0.5
phase = torch.atan2(imag_input, real_input)
phase[phase.ne(phase)] = 0.0
return torch.stack((mag, phase), dim=input1.ndimension() - 1)
else:
"""input1 is real part and input2 is imaginary part; both of size [B,C,H,W,D]"""
mag = (input1 ** 2 + input2 ** 2) ** 0.5
phase = torch.atan2(input2, input1)
phase[phase.ne(phase)] = 0.0
return mag, phase
class ZReLU(nn.Module):
def __init__(self, polar=False):
super(ZReLU, self).__init__()
self.polar = polar
def forward(self, input):
ndims = input.ndimension()
input_real = input.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
input_imag = input.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
if not self.polar:
_mag, phase = cylindricalToPolarConversion(input_real, input_imag)
else:
phase = input_imag
phase = phase.unsqueeze(-1)
phase = torch.cat([phase, phase], ndims - 1)
output = torch.where(phase >= 0.0, input, torch.tensor(0.0))
output = torch.where(phase <= np.pi / 2, output, torch.tensor(0.0))
return output
def get_inputs():
return [torch.rand([4, 4, 4, 2])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_atan2_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp2 = libdevice.atan2(tmp0, tmp1)
tmp3 = tmp2 != tmp2
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_cat_ge_le_lift_fresh_where_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x2, xmask)
tmp1 = 1.5707963267948966
tmp2 = tmp0 <= tmp1
tmp3 = 0.0
tmp4 = tmp0 >= tmp3
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.where(tmp2, tmp6, tmp3)
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 2), (32, 8, 2, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_atan2_index_put_lift_fresh_0[grid(64)](arg0_1,
buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused_cat_ge_le_lift_fresh_where_1[grid(128)](buf0,
arg0_1, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf0
return buf1,
def cylindricalToPolarConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both real and imaginary channels
in the last dims"""
ndims = input1.ndimension()
real_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
imag_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
mag = (real_input ** 2 + imag_input ** 2) ** 0.5
phase = torch.atan2(imag_input, real_input)
phase[phase.ne(phase)] = 0.0
return torch.stack((mag, phase), dim=input1.ndimension() - 1)
else:
"""input1 is real part and input2 is imaginary part; both of size [B,C,H,W,D]"""
mag = (input1 ** 2 + input2 ** 2) ** 0.5
phase = torch.atan2(input2, input1)
phase[phase.ne(phase)] = 0.0
return mag, phase
class ZReLUNew(nn.Module):
def __init__(self, polar=False):
super(ZReLUNew, self).__init__()
self.polar = polar
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| wizofe/urus-mri-recon | ZReLU | false | 4,542 | [
"MIT"
] | 0 | eab8e48dca31d2b936ce69ccc251ec5a4a10facc | https://github.com/wizofe/urus-mri-recon/tree/eab8e48dca31d2b936ce69ccc251ec5a4a10facc | import torch
import numpy as np
import torch.nn as nn
def cylindricalToPolarConversion(input1, input2=None):
if input2 is None:
"""input1 is tensor of [B,C,H,W,D,2] contains both real and imaginary channels
in the last dims"""
ndims = input1.ndimension()
real_input = input1.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
imag_input = input1.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
mag = (real_input ** 2 + imag_input ** 2) ** 0.5
phase = torch.atan2(imag_input, real_input)
phase[phase.ne(phase)] = 0.0
return torch.stack((mag, phase), dim=input1.ndimension() - 1)
else:
"""input1 is real part and input2 is imaginary part; both of size [B,C,H,W,D]"""
mag = (input1 ** 2 + input2 ** 2) ** 0.5
phase = torch.atan2(input2, input1)
phase[phase.ne(phase)] = 0.0
return mag, phase
class Model(nn.Module):
def __init__(self, polar=False):
super().__init__()
self.polar = polar
def forward(self, input):
ndims = input.ndimension()
input_real = input.narrow(ndims - 1, 0, 1).squeeze(ndims - 1)
input_imag = input.narrow(ndims - 1, 1, 1).squeeze(ndims - 1)
if not self.polar:
_mag, phase = cylindricalToPolarConversion(input_real, input_imag)
else:
phase = input_imag
phase = phase.unsqueeze(-1)
phase = torch.cat([phase, phase], ndims - 1)
output = torch.where(phase >= 0.0, input, torch.tensor(0.0))
output = torch.where(phase <= np.pi / 2, output, torch.tensor(0.0))
return output
def get_inputs():
return [torch.rand([4, 4, 4, 2])]
def get_init_inputs():
return []
|
ModReLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ez/cezo3qeqowipxahtpm7rhn2jnvy6no5gdlebcssicgia3nr4zaon.py
# Topologically Sorted Source Nodes: [mag_1, add_2, lt, tensor, output], Original ATen: [aten.cat, aten.add, aten.lt, aten.lift_fresh, aten.where]
# Source node to ATen node mapping:
# add_2 => add_2
# lt => lt
# mag_1 => clone
# output => where
# tensor => full_default
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%clone, %expand_1), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%add_2, 0.3), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %full_default, %arg0_1), kwargs = {})
triton_poi_fused_add_cat_lift_fresh_lt_where_0 = async_compile.triton('triton_poi_fused_add_cat_lift_fresh_lt_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_lift_fresh_lt_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_lift_fresh_lt_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 2)
x2 = (xindex // 8) % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2*x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x4)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (x5), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp5 = libdevice.sqrt(tmp4)
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = 0.3
tmp11 = tmp9 < tmp10
tmp13 = 0.0
tmp14 = tl.where(tmp11, tmp13, tmp12)
tl.store(out_ptr0 + (x5), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 2), (32, 8, 2, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [mag_1, add_2, lt, tensor, output], Original ATen: [aten.cat, aten.add, aten.lt, aten.lift_fresh, aten.where]
stream0 = get_raw_stream(0)
triton_poi_fused_add_cat_lift_fresh_lt_where_0.run(arg0_1, arg1_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 2), (32, 8, 2, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
def magnitude(input):
if input.ndimension() == 4:
return (input[:, :, :, 0] ** 2 + input[:, :, :, 1] ** 2) ** 0.5
elif input.ndimension() == 5:
return (input[:, :, :, :, 0] ** 2 + input[:, :, :, :, 1] ** 2) ** 0.5
elif input.ndimension() == 6:
return (input[:, :, :, :, :, 0] ** 2 + input[:, :, :, :, :, 1] ** 2
) ** 0.5
class ModReLU(nn.Module):
def __init__(self, in_channels, inplace=True):
super(ModReLU, self).__init__()
self.inplace = inplace
self.in_channels = in_channels
self.b = Parameter(torch.Tensor(in_channels), requires_grad=True)
self.reset_parameters()
self.relu = nn.ReLU(self.inplace)
def reset_parameters(self):
self.b.data.uniform_(-0.01, 0.0)
def forward(self, input):
eps = 1e-05
ndims = input.ndimension()
mag = magnitude(input).unsqueeze(-1) + eps
mag = torch.cat([mag, mag], ndims - 1)
if ndims == 4:
brdcst_b = self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(
mag)
elif ndims == 5:
brdcst_b = self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(
4).expand_as(mag)
elif ndims == 6:
brdcst_b = self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(
4).unsqueeze(5).expand_as(mag)
output = torch.where(mag + brdcst_b < 0.3, torch.tensor(0.0), input)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 2])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_cat_lift_fresh_lt_where_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 2
x2 = xindex // 8 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x4), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + x5, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp5 = libdevice.sqrt(tmp4)
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = 0.3
tmp11 = tmp9 < tmp10
tmp13 = 0.0
tmp14 = tl.where(tmp11, tmp13, tmp12)
tl.store(out_ptr0 + x5, tmp14, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 2), (32, 8, 2, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_cat_lift_fresh_lt_where_0[grid(128)](arg0_1,
arg1_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def magnitude(input):
if input.ndimension() == 4:
return (input[:, :, :, 0] ** 2 + input[:, :, :, 1] ** 2) ** 0.5
elif input.ndimension() == 5:
return (input[:, :, :, :, 0] ** 2 + input[:, :, :, :, 1] ** 2) ** 0.5
elif input.ndimension() == 6:
return (input[:, :, :, :, :, 0] ** 2 + input[:, :, :, :, :, 1] ** 2
) ** 0.5
class ModReLUNew(nn.Module):
def __init__(self, in_channels, inplace=True):
super(ModReLUNew, self).__init__()
self.inplace = inplace
self.in_channels = in_channels
self.b = Parameter(torch.Tensor(in_channels), requires_grad=True)
self.reset_parameters()
self.relu = nn.ReLU(self.inplace)
def reset_parameters(self):
self.b.data.uniform_(-0.01, 0.0)
def forward(self, input_0):
arg1_1 = self.b
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
| wizofe/urus-mri-recon | ModReLU | false | 4,543 | [
"MIT"
] | 0 | eab8e48dca31d2b936ce69ccc251ec5a4a10facc | https://github.com/wizofe/urus-mri-recon/tree/eab8e48dca31d2b936ce69ccc251ec5a4a10facc | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
def magnitude(input):
if input.ndimension() == 4:
return (input[:, :, :, 0] ** 2 + input[:, :, :, 1] ** 2) ** 0.5
elif input.ndimension() == 5:
return (input[:, :, :, :, 0] ** 2 + input[:, :, :, :, 1] ** 2) ** 0.5
elif input.ndimension() == 6:
return (input[:, :, :, :, :, 0] ** 2 + input[:, :, :, :, :, 1] ** 2
) ** 0.5
class Model(nn.Module):
def __init__(self, in_channels, inplace=True):
super().__init__()
self.inplace = inplace
self.in_channels = in_channels
self.b = Parameter(torch.Tensor(in_channels), requires_grad=True)
self.reset_parameters()
self.relu = nn.ReLU(self.inplace)
def reset_parameters(self):
self.b.data.uniform_(-0.01, 0.0)
def forward(self, input):
eps = 1e-05
ndims = input.ndimension()
mag = magnitude(input).unsqueeze(-1) + eps
mag = torch.cat([mag, mag], ndims - 1)
if ndims == 4:
brdcst_b = self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(
mag)
elif ndims == 5:
brdcst_b = self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(
4).expand_as(mag)
elif ndims == 6:
brdcst_b = self.b.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(
4).unsqueeze(5).expand_as(mag)
output = torch.where(mag + brdcst_b < 0.3, torch.tensor(0.0), input)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 2])]
def get_init_inputs():
return [4]
|
PointLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/rs/crsg7dlacsfgnuurqkc7zcxhea7d6tgzbng6ovijxiosh7gpwagi.py
# Topologically Sorted Source Nodes: [min_1, min_2, min_3, min_4, min_5, min_6, min_7, min_8, distances_4, mul_2, distances_9, mul_3, add, truediv, dist, distances_14, mul_6, distances_19, mul_7, add_2, truediv_1, dist_1, distances_24, mul_10, distances_29, mul_11, add_4, truediv_2, dist_2, distances_34, mul_14, distances_39, mul_15, add_6, truediv_3, dist_3, mul_16], Original ATen: [aten.min, aten.mean, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# add_2 => add_2
# add_4 => add_4
# add_6 => add_6
# dist => add_1
# dist_1 => add_3
# dist_2 => add_5
# dist_3 => add_7
# distances_14 => mean_2
# distances_19 => mean_3
# distances_24 => mean_4
# distances_29 => mean_5
# distances_34 => mean_6
# distances_39 => mean_7
# distances_4 => mean
# distances_9 => mean_1
# min_1 => min_1
# min_2 => min_2
# min_3 => min_3
# min_4 => min_4
# min_5 => min_5
# min_6 => min_6
# min_7 => min_7
# min_8 => min_8
# mul_10 => mul_10
# mul_11 => mul_11
# mul_14 => mul_14
# mul_15 => mul_15
# mul_16 => mul_16
# mul_2 => mul_2
# mul_3 => mul_3
# mul_6 => mul_6
# mul_7 => mul_7
# truediv => div
# truediv_1 => div_1
# truediv_2 => div_2
# truediv_3 => div_3
# Graph fragment:
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_1, 1), kwargs = {})
# %min_2 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_3, 1), kwargs = {})
# %min_3 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_5, 1), kwargs = {})
# %min_4 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_7, 1), kwargs = {})
# %min_5 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_9, 1), kwargs = {})
# %min_6 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_11, 1), kwargs = {})
# %min_7 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_13, 1), kwargs = {})
# %min_8 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view_15, 1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 0), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem_4,), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, 0.5), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem_6,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_3, 0.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %mul_7), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 4), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %div_1), kwargs = {})
# %mean_4 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem_8,), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_4, 0.5), kwargs = {})
# %mean_5 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem_10,), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_5, 0.5), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, %mul_11), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_4, 4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %div_2), kwargs = {})
# %mean_6 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem_12,), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_6, 0.5), kwargs = {})
# %mean_7 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%getitem_14,), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_7, 0.5), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_14, %mul_15), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_6, 4), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %div_3), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 100), kwargs = {})
triton_per_fused_add_div_mean_min_mul_0 = async_compile.triton('triton_per_fused_add_div_mean_min_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_min_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 160, 'num_reduction': 8, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_min_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*((4*r0) % 4)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (4))
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp23 = tl.load(in_ptr0 + (5))
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp28 = tl.load(in_ptr0 + (6))
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp33 = tl.load(in_ptr0 + (7))
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp39 = tl.load(in_ptr0 + (8))
tmp40 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK])
tmp43 = tl.load(in_ptr0 + (9))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp48 = tl.load(in_ptr0 + (10))
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp53 = tl.load(in_ptr0 + (11))
tmp54 = tl.broadcast_to(tmp53, [XBLOCK, RBLOCK])
tmp59 = tl.load(in_ptr0 + (12))
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tmp63 = tl.load(in_ptr0 + (13))
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp68 = tl.load(in_ptr0 + (14))
tmp69 = tl.broadcast_to(tmp68, [XBLOCK, RBLOCK])
tmp73 = tl.load(in_ptr0 + (15))
tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK])
tmp82 = tl.load(in_ptr1 + (4*((4*r0) % 4)), None, eviction_policy='evict_last')
tmp83 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp86 = tl.load(in_ptr1 + (1 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp87 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp91 = tl.load(in_ptr1 + (2 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp92 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp96 = tl.load(in_ptr1 + (3 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp97 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp101 = tl.load(in_ptr1 + (4))
tmp102 = tl.broadcast_to(tmp101, [XBLOCK, RBLOCK])
tmp105 = tl.load(in_ptr1 + (5))
tmp106 = tl.broadcast_to(tmp105, [XBLOCK, RBLOCK])
tmp110 = tl.load(in_ptr1 + (6))
tmp111 = tl.broadcast_to(tmp110, [XBLOCK, RBLOCK])
tmp115 = tl.load(in_ptr1 + (7))
tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK])
tmp121 = tl.load(in_ptr1 + (8))
tmp122 = tl.broadcast_to(tmp121, [XBLOCK, RBLOCK])
tmp125 = tl.load(in_ptr1 + (9))
tmp126 = tl.broadcast_to(tmp125, [XBLOCK, RBLOCK])
tmp130 = tl.load(in_ptr1 + (10))
tmp131 = tl.broadcast_to(tmp130, [XBLOCK, RBLOCK])
tmp135 = tl.load(in_ptr1 + (11))
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp141 = tl.load(in_ptr1 + (12))
tmp142 = tl.broadcast_to(tmp141, [XBLOCK, RBLOCK])
tmp145 = tl.load(in_ptr1 + (13))
tmp146 = tl.broadcast_to(tmp145, [XBLOCK, RBLOCK])
tmp150 = tl.load(in_ptr1 + (14))
tmp151 = tl.broadcast_to(tmp150, [XBLOCK, RBLOCK])
tmp155 = tl.load(in_ptr1 + (15))
tmp156 = tl.broadcast_to(tmp155, [XBLOCK, RBLOCK])
tmp164 = tl.load(in_ptr0 + (16 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp165 = tl.load(in_ptr1 + (16 + (4*r0)), None, eviction_policy='evict_last')
tmp168 = tl.load(in_ptr0 + (17 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp169 = tl.load(in_ptr1 + (17 + (4*r0)), None, eviction_policy='evict_last')
tmp173 = tl.load(in_ptr0 + (18 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp174 = tl.load(in_ptr1 + (18 + (4*r0)), None, eviction_policy='evict_last')
tmp178 = tl.load(in_ptr0 + (19 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp179 = tl.load(in_ptr1 + (19 + (4*r0)), None, eviction_policy='evict_last')
tmp183 = tl.load(in_ptr0 + (20))
tmp184 = tl.broadcast_to(tmp183, [XBLOCK, RBLOCK])
tmp187 = tl.load(in_ptr0 + (21))
tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK])
tmp192 = tl.load(in_ptr0 + (22))
tmp193 = tl.broadcast_to(tmp192, [XBLOCK, RBLOCK])
tmp197 = tl.load(in_ptr0 + (23))
tmp198 = tl.broadcast_to(tmp197, [XBLOCK, RBLOCK])
tmp203 = tl.load(in_ptr0 + (24))
tmp204 = tl.broadcast_to(tmp203, [XBLOCK, RBLOCK])
tmp207 = tl.load(in_ptr0 + (25))
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp212 = tl.load(in_ptr0 + (26))
tmp213 = tl.broadcast_to(tmp212, [XBLOCK, RBLOCK])
tmp217 = tl.load(in_ptr0 + (27))
tmp218 = tl.broadcast_to(tmp217, [XBLOCK, RBLOCK])
tmp223 = tl.load(in_ptr0 + (28))
tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK])
tmp227 = tl.load(in_ptr0 + (29))
tmp228 = tl.broadcast_to(tmp227, [XBLOCK, RBLOCK])
tmp232 = tl.load(in_ptr0 + (30))
tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK])
tmp237 = tl.load(in_ptr0 + (31))
tmp238 = tl.broadcast_to(tmp237, [XBLOCK, RBLOCK])
tmp246 = tl.load(in_ptr1 + (16 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp247 = tl.load(in_ptr0 + (16 + (4*r0)), None, eviction_policy='evict_last')
tmp250 = tl.load(in_ptr1 + (17 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp251 = tl.load(in_ptr0 + (17 + (4*r0)), None, eviction_policy='evict_last')
tmp255 = tl.load(in_ptr1 + (18 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp256 = tl.load(in_ptr0 + (18 + (4*r0)), None, eviction_policy='evict_last')
tmp260 = tl.load(in_ptr1 + (19 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp261 = tl.load(in_ptr0 + (19 + (4*r0)), None, eviction_policy='evict_last')
tmp265 = tl.load(in_ptr1 + (20))
tmp266 = tl.broadcast_to(tmp265, [XBLOCK, RBLOCK])
tmp269 = tl.load(in_ptr1 + (21))
tmp270 = tl.broadcast_to(tmp269, [XBLOCK, RBLOCK])
tmp274 = tl.load(in_ptr1 + (22))
tmp275 = tl.broadcast_to(tmp274, [XBLOCK, RBLOCK])
tmp279 = tl.load(in_ptr1 + (23))
tmp280 = tl.broadcast_to(tmp279, [XBLOCK, RBLOCK])
tmp285 = tl.load(in_ptr1 + (24))
tmp286 = tl.broadcast_to(tmp285, [XBLOCK, RBLOCK])
tmp289 = tl.load(in_ptr1 + (25))
tmp290 = tl.broadcast_to(tmp289, [XBLOCK, RBLOCK])
tmp294 = tl.load(in_ptr1 + (26))
tmp295 = tl.broadcast_to(tmp294, [XBLOCK, RBLOCK])
tmp299 = tl.load(in_ptr1 + (27))
tmp300 = tl.broadcast_to(tmp299, [XBLOCK, RBLOCK])
tmp305 = tl.load(in_ptr1 + (28))
tmp306 = tl.broadcast_to(tmp305, [XBLOCK, RBLOCK])
tmp309 = tl.load(in_ptr1 + (29))
tmp310 = tl.broadcast_to(tmp309, [XBLOCK, RBLOCK])
tmp314 = tl.load(in_ptr1 + (30))
tmp315 = tl.broadcast_to(tmp314, [XBLOCK, RBLOCK])
tmp319 = tl.load(in_ptr1 + (31))
tmp320 = tl.broadcast_to(tmp319, [XBLOCK, RBLOCK])
tmp328 = tl.load(in_ptr0 + (48 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp329 = tl.load(in_ptr1 + (48 + (4*r0)), None, eviction_policy='evict_last')
tmp332 = tl.load(in_ptr0 + (49 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp333 = tl.load(in_ptr1 + (49 + (4*r0)), None, eviction_policy='evict_last')
tmp337 = tl.load(in_ptr0 + (50 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp338 = tl.load(in_ptr1 + (50 + (4*r0)), None, eviction_policy='evict_last')
tmp342 = tl.load(in_ptr0 + (51 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp343 = tl.load(in_ptr1 + (51 + (4*r0)), None, eviction_policy='evict_last')
tmp347 = tl.load(in_ptr0 + (52))
tmp348 = tl.broadcast_to(tmp347, [XBLOCK, RBLOCK])
tmp351 = tl.load(in_ptr0 + (53))
tmp352 = tl.broadcast_to(tmp351, [XBLOCK, RBLOCK])
tmp356 = tl.load(in_ptr0 + (54))
tmp357 = tl.broadcast_to(tmp356, [XBLOCK, RBLOCK])
tmp361 = tl.load(in_ptr0 + (55))
tmp362 = tl.broadcast_to(tmp361, [XBLOCK, RBLOCK])
tmp367 = tl.load(in_ptr0 + (56))
tmp368 = tl.broadcast_to(tmp367, [XBLOCK, RBLOCK])
tmp371 = tl.load(in_ptr0 + (57))
tmp372 = tl.broadcast_to(tmp371, [XBLOCK, RBLOCK])
tmp376 = tl.load(in_ptr0 + (58))
tmp377 = tl.broadcast_to(tmp376, [XBLOCK, RBLOCK])
tmp381 = tl.load(in_ptr0 + (59))
tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK])
tmp387 = tl.load(in_ptr0 + (60))
tmp388 = tl.broadcast_to(tmp387, [XBLOCK, RBLOCK])
tmp391 = tl.load(in_ptr0 + (61))
tmp392 = tl.broadcast_to(tmp391, [XBLOCK, RBLOCK])
tmp396 = tl.load(in_ptr0 + (62))
tmp397 = tl.broadcast_to(tmp396, [XBLOCK, RBLOCK])
tmp401 = tl.load(in_ptr0 + (63))
tmp402 = tl.broadcast_to(tmp401, [XBLOCK, RBLOCK])
tmp410 = tl.load(in_ptr0 + (32 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp411 = tl.load(in_ptr1 + (32 + (4*r0)), None, eviction_policy='evict_last')
tmp414 = tl.load(in_ptr0 + (33 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp415 = tl.load(in_ptr1 + (33 + (4*r0)), None, eviction_policy='evict_last')
tmp419 = tl.load(in_ptr0 + (34 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp420 = tl.load(in_ptr1 + (34 + (4*r0)), None, eviction_policy='evict_last')
tmp424 = tl.load(in_ptr0 + (35 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp425 = tl.load(in_ptr1 + (35 + (4*r0)), None, eviction_policy='evict_last')
tmp429 = tl.load(in_ptr0 + (36))
tmp430 = tl.broadcast_to(tmp429, [XBLOCK, RBLOCK])
tmp433 = tl.load(in_ptr0 + (37))
tmp434 = tl.broadcast_to(tmp433, [XBLOCK, RBLOCK])
tmp438 = tl.load(in_ptr0 + (38))
tmp439 = tl.broadcast_to(tmp438, [XBLOCK, RBLOCK])
tmp443 = tl.load(in_ptr0 + (39))
tmp444 = tl.broadcast_to(tmp443, [XBLOCK, RBLOCK])
tmp449 = tl.load(in_ptr0 + (40))
tmp450 = tl.broadcast_to(tmp449, [XBLOCK, RBLOCK])
tmp453 = tl.load(in_ptr0 + (41))
tmp454 = tl.broadcast_to(tmp453, [XBLOCK, RBLOCK])
tmp458 = tl.load(in_ptr0 + (42))
tmp459 = tl.broadcast_to(tmp458, [XBLOCK, RBLOCK])
tmp463 = tl.load(in_ptr0 + (43))
tmp464 = tl.broadcast_to(tmp463, [XBLOCK, RBLOCK])
tmp469 = tl.load(in_ptr0 + (44))
tmp470 = tl.broadcast_to(tmp469, [XBLOCK, RBLOCK])
tmp473 = tl.load(in_ptr0 + (45))
tmp474 = tl.broadcast_to(tmp473, [XBLOCK, RBLOCK])
tmp478 = tl.load(in_ptr0 + (46))
tmp479 = tl.broadcast_to(tmp478, [XBLOCK, RBLOCK])
tmp483 = tl.load(in_ptr0 + (47))
tmp484 = tl.broadcast_to(tmp483, [XBLOCK, RBLOCK])
tmp492 = tl.load(in_ptr1 + (48 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp493 = tl.load(in_ptr0 + (48 + (4*r0)), None, eviction_policy='evict_last')
tmp496 = tl.load(in_ptr1 + (49 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp497 = tl.load(in_ptr0 + (49 + (4*r0)), None, eviction_policy='evict_last')
tmp501 = tl.load(in_ptr1 + (50 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp502 = tl.load(in_ptr0 + (50 + (4*r0)), None, eviction_policy='evict_last')
tmp506 = tl.load(in_ptr1 + (51 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp507 = tl.load(in_ptr0 + (51 + (4*r0)), None, eviction_policy='evict_last')
tmp511 = tl.load(in_ptr1 + (52))
tmp512 = tl.broadcast_to(tmp511, [XBLOCK, RBLOCK])
tmp515 = tl.load(in_ptr1 + (53))
tmp516 = tl.broadcast_to(tmp515, [XBLOCK, RBLOCK])
tmp520 = tl.load(in_ptr1 + (54))
tmp521 = tl.broadcast_to(tmp520, [XBLOCK, RBLOCK])
tmp525 = tl.load(in_ptr1 + (55))
tmp526 = tl.broadcast_to(tmp525, [XBLOCK, RBLOCK])
tmp531 = tl.load(in_ptr1 + (56))
tmp532 = tl.broadcast_to(tmp531, [XBLOCK, RBLOCK])
tmp535 = tl.load(in_ptr1 + (57))
tmp536 = tl.broadcast_to(tmp535, [XBLOCK, RBLOCK])
tmp540 = tl.load(in_ptr1 + (58))
tmp541 = tl.broadcast_to(tmp540, [XBLOCK, RBLOCK])
tmp545 = tl.load(in_ptr1 + (59))
tmp546 = tl.broadcast_to(tmp545, [XBLOCK, RBLOCK])
tmp551 = tl.load(in_ptr1 + (60))
tmp552 = tl.broadcast_to(tmp551, [XBLOCK, RBLOCK])
tmp555 = tl.load(in_ptr1 + (61))
tmp556 = tl.broadcast_to(tmp555, [XBLOCK, RBLOCK])
tmp560 = tl.load(in_ptr1 + (62))
tmp561 = tl.broadcast_to(tmp560, [XBLOCK, RBLOCK])
tmp565 = tl.load(in_ptr1 + (63))
tmp566 = tl.broadcast_to(tmp565, [XBLOCK, RBLOCK])
tmp574 = tl.load(in_ptr1 + (32 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp575 = tl.load(in_ptr0 + (32 + (4*r0)), None, eviction_policy='evict_last')
tmp578 = tl.load(in_ptr1 + (33 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp579 = tl.load(in_ptr0 + (33 + (4*r0)), None, eviction_policy='evict_last')
tmp583 = tl.load(in_ptr1 + (34 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp584 = tl.load(in_ptr0 + (34 + (4*r0)), None, eviction_policy='evict_last')
tmp588 = tl.load(in_ptr1 + (35 + (4*((4*r0) % 4))), None, eviction_policy='evict_last')
tmp589 = tl.load(in_ptr0 + (35 + (4*r0)), None, eviction_policy='evict_last')
tmp593 = tl.load(in_ptr1 + (36))
tmp594 = tl.broadcast_to(tmp593, [XBLOCK, RBLOCK])
tmp597 = tl.load(in_ptr1 + (37))
tmp598 = tl.broadcast_to(tmp597, [XBLOCK, RBLOCK])
tmp602 = tl.load(in_ptr1 + (38))
tmp603 = tl.broadcast_to(tmp602, [XBLOCK, RBLOCK])
tmp607 = tl.load(in_ptr1 + (39))
tmp608 = tl.broadcast_to(tmp607, [XBLOCK, RBLOCK])
tmp613 = tl.load(in_ptr1 + (40))
tmp614 = tl.broadcast_to(tmp613, [XBLOCK, RBLOCK])
tmp617 = tl.load(in_ptr1 + (41))
tmp618 = tl.broadcast_to(tmp617, [XBLOCK, RBLOCK])
tmp622 = tl.load(in_ptr1 + (42))
tmp623 = tl.broadcast_to(tmp622, [XBLOCK, RBLOCK])
tmp627 = tl.load(in_ptr1 + (43))
tmp628 = tl.broadcast_to(tmp627, [XBLOCK, RBLOCK])
tmp633 = tl.load(in_ptr1 + (44))
tmp634 = tl.broadcast_to(tmp633, [XBLOCK, RBLOCK])
tmp637 = tl.load(in_ptr1 + (45))
tmp638 = tl.broadcast_to(tmp637, [XBLOCK, RBLOCK])
tmp642 = tl.load(in_ptr1 + (46))
tmp643 = tl.broadcast_to(tmp642, [XBLOCK, RBLOCK])
tmp647 = tl.load(in_ptr1 + (47))
tmp648 = tl.broadcast_to(tmp647, [XBLOCK, RBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp21 = tmp20 - tmp1
tmp22 = tmp21 * tmp21
tmp25 = tmp24 - tmp5
tmp26 = tmp25 * tmp25
tmp27 = tmp22 + tmp26
tmp30 = tmp29 - tmp10
tmp31 = tmp30 * tmp30
tmp32 = tmp27 + tmp31
tmp35 = tmp34 - tmp15
tmp36 = tmp35 * tmp35
tmp37 = tmp32 + tmp36
tmp38 = triton_helpers.minimum(tmp18, tmp37)
tmp41 = tmp40 - tmp1
tmp42 = tmp41 * tmp41
tmp45 = tmp44 - tmp5
tmp46 = tmp45 * tmp45
tmp47 = tmp42 + tmp46
tmp50 = tmp49 - tmp10
tmp51 = tmp50 * tmp50
tmp52 = tmp47 + tmp51
tmp55 = tmp54 - tmp15
tmp56 = tmp55 * tmp55
tmp57 = tmp52 + tmp56
tmp58 = triton_helpers.minimum(tmp38, tmp57)
tmp61 = tmp60 - tmp1
tmp62 = tmp61 * tmp61
tmp65 = tmp64 - tmp5
tmp66 = tmp65 * tmp65
tmp67 = tmp62 + tmp66
tmp70 = tmp69 - tmp10
tmp71 = tmp70 * tmp70
tmp72 = tmp67 + tmp71
tmp75 = tmp74 - tmp15
tmp76 = tmp75 * tmp75
tmp77 = tmp72 + tmp76
tmp78 = triton_helpers.minimum(tmp58, tmp77)
tmp79 = tl.broadcast_to(tmp78, [XBLOCK, RBLOCK])
tmp81 = tl.sum(tmp79, 1)[:, None]
tmp84 = tmp82 - tmp83
tmp85 = tmp84 * tmp84
tmp88 = tmp86 - tmp87
tmp89 = tmp88 * tmp88
tmp90 = tmp85 + tmp89
tmp93 = tmp91 - tmp92
tmp94 = tmp93 * tmp93
tmp95 = tmp90 + tmp94
tmp98 = tmp96 - tmp97
tmp99 = tmp98 * tmp98
tmp100 = tmp95 + tmp99
tmp103 = tmp102 - tmp83
tmp104 = tmp103 * tmp103
tmp107 = tmp106 - tmp87
tmp108 = tmp107 * tmp107
tmp109 = tmp104 + tmp108
tmp112 = tmp111 - tmp92
tmp113 = tmp112 * tmp112
tmp114 = tmp109 + tmp113
tmp117 = tmp116 - tmp97
tmp118 = tmp117 * tmp117
tmp119 = tmp114 + tmp118
tmp120 = triton_helpers.minimum(tmp100, tmp119)
tmp123 = tmp122 - tmp83
tmp124 = tmp123 * tmp123
tmp127 = tmp126 - tmp87
tmp128 = tmp127 * tmp127
tmp129 = tmp124 + tmp128
tmp132 = tmp131 - tmp92
tmp133 = tmp132 * tmp132
tmp134 = tmp129 + tmp133
tmp137 = tmp136 - tmp97
tmp138 = tmp137 * tmp137
tmp139 = tmp134 + tmp138
tmp140 = triton_helpers.minimum(tmp120, tmp139)
tmp143 = tmp142 - tmp83
tmp144 = tmp143 * tmp143
tmp147 = tmp146 - tmp87
tmp148 = tmp147 * tmp147
tmp149 = tmp144 + tmp148
tmp152 = tmp151 - tmp92
tmp153 = tmp152 * tmp152
tmp154 = tmp149 + tmp153
tmp157 = tmp156 - tmp97
tmp158 = tmp157 * tmp157
tmp159 = tmp154 + tmp158
tmp160 = triton_helpers.minimum(tmp140, tmp159)
tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK])
tmp163 = tl.sum(tmp161, 1)[:, None]
tmp166 = tmp164 - tmp165
tmp167 = tmp166 * tmp166
tmp170 = tmp168 - tmp169
tmp171 = tmp170 * tmp170
tmp172 = tmp167 + tmp171
tmp175 = tmp173 - tmp174
tmp176 = tmp175 * tmp175
tmp177 = tmp172 + tmp176
tmp180 = tmp178 - tmp179
tmp181 = tmp180 * tmp180
tmp182 = tmp177 + tmp181
tmp185 = tmp184 - tmp165
tmp186 = tmp185 * tmp185
tmp189 = tmp188 - tmp169
tmp190 = tmp189 * tmp189
tmp191 = tmp186 + tmp190
tmp194 = tmp193 - tmp174
tmp195 = tmp194 * tmp194
tmp196 = tmp191 + tmp195
tmp199 = tmp198 - tmp179
tmp200 = tmp199 * tmp199
tmp201 = tmp196 + tmp200
tmp202 = triton_helpers.minimum(tmp182, tmp201)
tmp205 = tmp204 - tmp165
tmp206 = tmp205 * tmp205
tmp209 = tmp208 - tmp169
tmp210 = tmp209 * tmp209
tmp211 = tmp206 + tmp210
tmp214 = tmp213 - tmp174
tmp215 = tmp214 * tmp214
tmp216 = tmp211 + tmp215
tmp219 = tmp218 - tmp179
tmp220 = tmp219 * tmp219
tmp221 = tmp216 + tmp220
tmp222 = triton_helpers.minimum(tmp202, tmp221)
tmp225 = tmp224 - tmp165
tmp226 = tmp225 * tmp225
tmp229 = tmp228 - tmp169
tmp230 = tmp229 * tmp229
tmp231 = tmp226 + tmp230
tmp234 = tmp233 - tmp174
tmp235 = tmp234 * tmp234
tmp236 = tmp231 + tmp235
tmp239 = tmp238 - tmp179
tmp240 = tmp239 * tmp239
tmp241 = tmp236 + tmp240
tmp242 = triton_helpers.minimum(tmp222, tmp241)
tmp243 = tl.broadcast_to(tmp242, [XBLOCK, RBLOCK])
tmp245 = tl.sum(tmp243, 1)[:, None]
tmp248 = tmp246 - tmp247
tmp249 = tmp248 * tmp248
tmp252 = tmp250 - tmp251
tmp253 = tmp252 * tmp252
tmp254 = tmp249 + tmp253
tmp257 = tmp255 - tmp256
tmp258 = tmp257 * tmp257
tmp259 = tmp254 + tmp258
tmp262 = tmp260 - tmp261
tmp263 = tmp262 * tmp262
tmp264 = tmp259 + tmp263
tmp267 = tmp266 - tmp247
tmp268 = tmp267 * tmp267
tmp271 = tmp270 - tmp251
tmp272 = tmp271 * tmp271
tmp273 = tmp268 + tmp272
tmp276 = tmp275 - tmp256
tmp277 = tmp276 * tmp276
tmp278 = tmp273 + tmp277
tmp281 = tmp280 - tmp261
tmp282 = tmp281 * tmp281
tmp283 = tmp278 + tmp282
tmp284 = triton_helpers.minimum(tmp264, tmp283)
tmp287 = tmp286 - tmp247
tmp288 = tmp287 * tmp287
tmp291 = tmp290 - tmp251
tmp292 = tmp291 * tmp291
tmp293 = tmp288 + tmp292
tmp296 = tmp295 - tmp256
tmp297 = tmp296 * tmp296
tmp298 = tmp293 + tmp297
tmp301 = tmp300 - tmp261
tmp302 = tmp301 * tmp301
tmp303 = tmp298 + tmp302
tmp304 = triton_helpers.minimum(tmp284, tmp303)
tmp307 = tmp306 - tmp247
tmp308 = tmp307 * tmp307
tmp311 = tmp310 - tmp251
tmp312 = tmp311 * tmp311
tmp313 = tmp308 + tmp312
tmp316 = tmp315 - tmp256
tmp317 = tmp316 * tmp316
tmp318 = tmp313 + tmp317
tmp321 = tmp320 - tmp261
tmp322 = tmp321 * tmp321
tmp323 = tmp318 + tmp322
tmp324 = triton_helpers.minimum(tmp304, tmp323)
tmp325 = tl.broadcast_to(tmp324, [XBLOCK, RBLOCK])
tmp327 = tl.sum(tmp325, 1)[:, None]
tmp330 = tmp328 - tmp329
tmp331 = tmp330 * tmp330
tmp334 = tmp332 - tmp333
tmp335 = tmp334 * tmp334
tmp336 = tmp331 + tmp335
tmp339 = tmp337 - tmp338
tmp340 = tmp339 * tmp339
tmp341 = tmp336 + tmp340
tmp344 = tmp342 - tmp343
tmp345 = tmp344 * tmp344
tmp346 = tmp341 + tmp345
tmp349 = tmp348 - tmp329
tmp350 = tmp349 * tmp349
tmp353 = tmp352 - tmp333
tmp354 = tmp353 * tmp353
tmp355 = tmp350 + tmp354
tmp358 = tmp357 - tmp338
tmp359 = tmp358 * tmp358
tmp360 = tmp355 + tmp359
tmp363 = tmp362 - tmp343
tmp364 = tmp363 * tmp363
tmp365 = tmp360 + tmp364
tmp366 = triton_helpers.minimum(tmp346, tmp365)
tmp369 = tmp368 - tmp329
tmp370 = tmp369 * tmp369
tmp373 = tmp372 - tmp333
tmp374 = tmp373 * tmp373
tmp375 = tmp370 + tmp374
tmp378 = tmp377 - tmp338
tmp379 = tmp378 * tmp378
tmp380 = tmp375 + tmp379
tmp383 = tmp382 - tmp343
tmp384 = tmp383 * tmp383
tmp385 = tmp380 + tmp384
tmp386 = triton_helpers.minimum(tmp366, tmp385)
tmp389 = tmp388 - tmp329
tmp390 = tmp389 * tmp389
tmp393 = tmp392 - tmp333
tmp394 = tmp393 * tmp393
tmp395 = tmp390 + tmp394
tmp398 = tmp397 - tmp338
tmp399 = tmp398 * tmp398
tmp400 = tmp395 + tmp399
tmp403 = tmp402 - tmp343
tmp404 = tmp403 * tmp403
tmp405 = tmp400 + tmp404
tmp406 = triton_helpers.minimum(tmp386, tmp405)
tmp407 = tl.broadcast_to(tmp406, [XBLOCK, RBLOCK])
tmp409 = tl.sum(tmp407, 1)[:, None]
tmp412 = tmp410 - tmp411
tmp413 = tmp412 * tmp412
tmp416 = tmp414 - tmp415
tmp417 = tmp416 * tmp416
tmp418 = tmp413 + tmp417
tmp421 = tmp419 - tmp420
tmp422 = tmp421 * tmp421
tmp423 = tmp418 + tmp422
tmp426 = tmp424 - tmp425
tmp427 = tmp426 * tmp426
tmp428 = tmp423 + tmp427
tmp431 = tmp430 - tmp411
tmp432 = tmp431 * tmp431
tmp435 = tmp434 - tmp415
tmp436 = tmp435 * tmp435
tmp437 = tmp432 + tmp436
tmp440 = tmp439 - tmp420
tmp441 = tmp440 * tmp440
tmp442 = tmp437 + tmp441
tmp445 = tmp444 - tmp425
tmp446 = tmp445 * tmp445
tmp447 = tmp442 + tmp446
tmp448 = triton_helpers.minimum(tmp428, tmp447)
tmp451 = tmp450 - tmp411
tmp452 = tmp451 * tmp451
tmp455 = tmp454 - tmp415
tmp456 = tmp455 * tmp455
tmp457 = tmp452 + tmp456
tmp460 = tmp459 - tmp420
tmp461 = tmp460 * tmp460
tmp462 = tmp457 + tmp461
tmp465 = tmp464 - tmp425
tmp466 = tmp465 * tmp465
tmp467 = tmp462 + tmp466
tmp468 = triton_helpers.minimum(tmp448, tmp467)
tmp471 = tmp470 - tmp411
tmp472 = tmp471 * tmp471
tmp475 = tmp474 - tmp415
tmp476 = tmp475 * tmp475
tmp477 = tmp472 + tmp476
tmp480 = tmp479 - tmp420
tmp481 = tmp480 * tmp480
tmp482 = tmp477 + tmp481
tmp485 = tmp484 - tmp425
tmp486 = tmp485 * tmp485
tmp487 = tmp482 + tmp486
tmp488 = triton_helpers.minimum(tmp468, tmp487)
tmp489 = tl.broadcast_to(tmp488, [XBLOCK, RBLOCK])
tmp491 = tl.sum(tmp489, 1)[:, None]
tmp494 = tmp492 - tmp493
tmp495 = tmp494 * tmp494
tmp498 = tmp496 - tmp497
tmp499 = tmp498 * tmp498
tmp500 = tmp495 + tmp499
tmp503 = tmp501 - tmp502
tmp504 = tmp503 * tmp503
tmp505 = tmp500 + tmp504
tmp508 = tmp506 - tmp507
tmp509 = tmp508 * tmp508
tmp510 = tmp505 + tmp509
tmp513 = tmp512 - tmp493
tmp514 = tmp513 * tmp513
tmp517 = tmp516 - tmp497
tmp518 = tmp517 * tmp517
tmp519 = tmp514 + tmp518
tmp522 = tmp521 - tmp502
tmp523 = tmp522 * tmp522
tmp524 = tmp519 + tmp523
tmp527 = tmp526 - tmp507
tmp528 = tmp527 * tmp527
tmp529 = tmp524 + tmp528
tmp530 = triton_helpers.minimum(tmp510, tmp529)
tmp533 = tmp532 - tmp493
tmp534 = tmp533 * tmp533
tmp537 = tmp536 - tmp497
tmp538 = tmp537 * tmp537
tmp539 = tmp534 + tmp538
tmp542 = tmp541 - tmp502
tmp543 = tmp542 * tmp542
tmp544 = tmp539 + tmp543
tmp547 = tmp546 - tmp507
tmp548 = tmp547 * tmp547
tmp549 = tmp544 + tmp548
tmp550 = triton_helpers.minimum(tmp530, tmp549)
tmp553 = tmp552 - tmp493
tmp554 = tmp553 * tmp553
tmp557 = tmp556 - tmp497
tmp558 = tmp557 * tmp557
tmp559 = tmp554 + tmp558
tmp562 = tmp561 - tmp502
tmp563 = tmp562 * tmp562
tmp564 = tmp559 + tmp563
tmp567 = tmp566 - tmp507
tmp568 = tmp567 * tmp567
tmp569 = tmp564 + tmp568
tmp570 = triton_helpers.minimum(tmp550, tmp569)
tmp571 = tl.broadcast_to(tmp570, [XBLOCK, RBLOCK])
tmp573 = tl.sum(tmp571, 1)[:, None]
tmp576 = tmp574 - tmp575
tmp577 = tmp576 * tmp576
tmp580 = tmp578 - tmp579
tmp581 = tmp580 * tmp580
tmp582 = tmp577 + tmp581
tmp585 = tmp583 - tmp584
tmp586 = tmp585 * tmp585
tmp587 = tmp582 + tmp586
tmp590 = tmp588 - tmp589
tmp591 = tmp590 * tmp590
tmp592 = tmp587 + tmp591
tmp595 = tmp594 - tmp575
tmp596 = tmp595 * tmp595
tmp599 = tmp598 - tmp579
tmp600 = tmp599 * tmp599
tmp601 = tmp596 + tmp600
tmp604 = tmp603 - tmp584
tmp605 = tmp604 * tmp604
tmp606 = tmp601 + tmp605
tmp609 = tmp608 - tmp589
tmp610 = tmp609 * tmp609
tmp611 = tmp606 + tmp610
tmp612 = triton_helpers.minimum(tmp592, tmp611)
tmp615 = tmp614 - tmp575
tmp616 = tmp615 * tmp615
tmp619 = tmp618 - tmp579
tmp620 = tmp619 * tmp619
tmp621 = tmp616 + tmp620
tmp624 = tmp623 - tmp584
tmp625 = tmp624 * tmp624
tmp626 = tmp621 + tmp625
tmp629 = tmp628 - tmp589
tmp630 = tmp629 * tmp629
tmp631 = tmp626 + tmp630
tmp632 = triton_helpers.minimum(tmp612, tmp631)
tmp635 = tmp634 - tmp575
tmp636 = tmp635 * tmp635
tmp639 = tmp638 - tmp579
tmp640 = tmp639 * tmp639
tmp641 = tmp636 + tmp640
tmp644 = tmp643 - tmp584
tmp645 = tmp644 * tmp644
tmp646 = tmp641 + tmp645
tmp649 = tmp648 - tmp589
tmp650 = tmp649 * tmp649
tmp651 = tmp646 + tmp650
tmp652 = triton_helpers.minimum(tmp632, tmp651)
tmp653 = tl.broadcast_to(tmp652, [XBLOCK, RBLOCK])
tmp655 = tl.sum(tmp653, 1)[:, None]
tmp656 = 4.0
tmp657 = tmp81 / tmp656
tmp658 = 0.5
tmp659 = tmp657 * tmp658
tmp660 = tmp163 / tmp656
tmp661 = tmp660 * tmp658
tmp662 = tmp659 + tmp661
tmp663 = 0.25
tmp664 = tmp662 * tmp663
tmp665 = 0.0
tmp666 = tmp664 + tmp665
tmp667 = tmp245 / tmp656
tmp668 = tmp667 * tmp658
tmp669 = tmp327 / tmp656
tmp670 = tmp669 * tmp658
tmp671 = tmp668 + tmp670
tmp672 = tmp671 * tmp663
tmp673 = tmp666 + tmp672
tmp674 = tmp491 / tmp656
tmp675 = tmp674 * tmp658
tmp676 = tmp655 / tmp656
tmp677 = tmp676 * tmp658
tmp678 = tmp675 + tmp677
tmp679 = tmp678 * tmp663
tmp680 = tmp673 + tmp679
tmp681 = tmp409 / tmp656
tmp682 = tmp681 * tmp658
tmp683 = tmp573 / tmp656
tmp684 = tmp683 * tmp658
tmp685 = tmp682 + tmp684
tmp686 = tmp685 * tmp663
tmp687 = tmp680 + tmp686
tmp688 = 100.0
tmp689 = tmp687 * tmp688
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp689, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((), (), torch.float32)
buf14 = buf10; del buf10 # reuse
buf17 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [min_1, min_2, min_3, min_4, min_5, min_6, min_7, min_8, distances_4, mul_2, distances_9, mul_3, add, truediv, dist, distances_14, mul_6, distances_19, mul_7, add_2, truediv_1, dist_1, distances_24, mul_10, distances_29, mul_11, add_4, truediv_2, dist_2, distances_34, mul_14, distances_39, mul_15, add_6, truediv_3, dist_3, mul_16], Original ATen: [aten.min, aten.mean, aten.mul, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_min_mul_0.run(buf17, arg0_1, arg1_1, 1, 4, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def array2samples_distance(array1, array2):
"""
arguments:
array1: the array, size: (num_point, num_feature)
array2: the samples, size: (num_point, num_feature)
returns:
distances: each entry is the distance from a sample to array1
"""
num_point1, _num_features1 = array1.shape
num_point2, num_features2 = array2.shape
expanded_array1 = array1.repeat(num_point2, 1)
expanded_array2 = torch.reshape(torch.unsqueeze(array2, 1).repeat(1,
num_point1, 1), (-1, num_features2))
distances = (expanded_array1 - expanded_array2) * (expanded_array1 -
expanded_array2)
distances = torch.sum(distances, dim=1)
distances = torch.reshape(distances, (num_point2, num_point1))
distances = torch.min(distances, dim=1)[0]
distances = torch.mean(distances)
return distances
def chamfer_distance_numpy(array1, array2):
batch_size, _num_point, _num_features = array1.shape
dist = 0
for i in range(batch_size):
av_dist1 = array2samples_distance(array1[i], array2[i])
av_dist2 = array2samples_distance(array2[i], array1[i])
dist = dist + (0.5 * av_dist1 + 0.5 * av_dist2) / batch_size
return dist * 100
class PointLoss(nn.Module):
def __init__(self):
super(PointLoss, self).__init__()
def forward(self, array1, array2):
return chamfer_distance_numpy(array1, array2)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mean_min_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * (4 * r0 % 4), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * (4 * r0 % 4)), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * (4 * r0 % 4)), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 4 * (4 * r0 % 4)), None, eviction_policy
='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + 4)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp23 = tl.load(in_ptr0 + 5)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp28 = tl.load(in_ptr0 + 6)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp33 = tl.load(in_ptr0 + 7)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp39 = tl.load(in_ptr0 + 8)
tmp40 = tl.broadcast_to(tmp39, [XBLOCK, RBLOCK])
tmp43 = tl.load(in_ptr0 + 9)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK])
tmp48 = tl.load(in_ptr0 + 10)
tmp49 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK])
tmp53 = tl.load(in_ptr0 + 11)
tmp54 = tl.broadcast_to(tmp53, [XBLOCK, RBLOCK])
tmp59 = tl.load(in_ptr0 + 12)
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tmp63 = tl.load(in_ptr0 + 13)
tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK])
tmp68 = tl.load(in_ptr0 + 14)
tmp69 = tl.broadcast_to(tmp68, [XBLOCK, RBLOCK])
tmp73 = tl.load(in_ptr0 + 15)
tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK])
tmp82 = tl.load(in_ptr1 + 4 * (4 * r0 % 4), None, eviction_policy=
'evict_last')
tmp83 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp86 = tl.load(in_ptr1 + (1 + 4 * (4 * r0 % 4)), None, eviction_policy
='evict_last')
tmp87 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp91 = tl.load(in_ptr1 + (2 + 4 * (4 * r0 % 4)), None, eviction_policy
='evict_last')
tmp92 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp96 = tl.load(in_ptr1 + (3 + 4 * (4 * r0 % 4)), None, eviction_policy
='evict_last')
tmp97 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp101 = tl.load(in_ptr1 + 4)
tmp102 = tl.broadcast_to(tmp101, [XBLOCK, RBLOCK])
tmp105 = tl.load(in_ptr1 + 5)
tmp106 = tl.broadcast_to(tmp105, [XBLOCK, RBLOCK])
tmp110 = tl.load(in_ptr1 + 6)
tmp111 = tl.broadcast_to(tmp110, [XBLOCK, RBLOCK])
tmp115 = tl.load(in_ptr1 + 7)
tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK])
tmp121 = tl.load(in_ptr1 + 8)
tmp122 = tl.broadcast_to(tmp121, [XBLOCK, RBLOCK])
tmp125 = tl.load(in_ptr1 + 9)
tmp126 = tl.broadcast_to(tmp125, [XBLOCK, RBLOCK])
tmp130 = tl.load(in_ptr1 + 10)
tmp131 = tl.broadcast_to(tmp130, [XBLOCK, RBLOCK])
tmp135 = tl.load(in_ptr1 + 11)
tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK])
tmp141 = tl.load(in_ptr1 + 12)
tmp142 = tl.broadcast_to(tmp141, [XBLOCK, RBLOCK])
tmp145 = tl.load(in_ptr1 + 13)
tmp146 = tl.broadcast_to(tmp145, [XBLOCK, RBLOCK])
tmp150 = tl.load(in_ptr1 + 14)
tmp151 = tl.broadcast_to(tmp150, [XBLOCK, RBLOCK])
tmp155 = tl.load(in_ptr1 + 15)
tmp156 = tl.broadcast_to(tmp155, [XBLOCK, RBLOCK])
tmp164 = tl.load(in_ptr0 + (16 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp165 = tl.load(in_ptr1 + (16 + 4 * r0), None, eviction_policy=
'evict_last')
tmp168 = tl.load(in_ptr0 + (17 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp169 = tl.load(in_ptr1 + (17 + 4 * r0), None, eviction_policy=
'evict_last')
tmp173 = tl.load(in_ptr0 + (18 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp174 = tl.load(in_ptr1 + (18 + 4 * r0), None, eviction_policy=
'evict_last')
tmp178 = tl.load(in_ptr0 + (19 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp179 = tl.load(in_ptr1 + (19 + 4 * r0), None, eviction_policy=
'evict_last')
tmp183 = tl.load(in_ptr0 + 20)
tmp184 = tl.broadcast_to(tmp183, [XBLOCK, RBLOCK])
tmp187 = tl.load(in_ptr0 + 21)
tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK])
tmp192 = tl.load(in_ptr0 + 22)
tmp193 = tl.broadcast_to(tmp192, [XBLOCK, RBLOCK])
tmp197 = tl.load(in_ptr0 + 23)
tmp198 = tl.broadcast_to(tmp197, [XBLOCK, RBLOCK])
tmp203 = tl.load(in_ptr0 + 24)
tmp204 = tl.broadcast_to(tmp203, [XBLOCK, RBLOCK])
tmp207 = tl.load(in_ptr0 + 25)
tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK])
tmp212 = tl.load(in_ptr0 + 26)
tmp213 = tl.broadcast_to(tmp212, [XBLOCK, RBLOCK])
tmp217 = tl.load(in_ptr0 + 27)
tmp218 = tl.broadcast_to(tmp217, [XBLOCK, RBLOCK])
tmp223 = tl.load(in_ptr0 + 28)
tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK])
tmp227 = tl.load(in_ptr0 + 29)
tmp228 = tl.broadcast_to(tmp227, [XBLOCK, RBLOCK])
tmp232 = tl.load(in_ptr0 + 30)
tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK])
tmp237 = tl.load(in_ptr0 + 31)
tmp238 = tl.broadcast_to(tmp237, [XBLOCK, RBLOCK])
tmp246 = tl.load(in_ptr1 + (16 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp247 = tl.load(in_ptr0 + (16 + 4 * r0), None, eviction_policy=
'evict_last')
tmp250 = tl.load(in_ptr1 + (17 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp251 = tl.load(in_ptr0 + (17 + 4 * r0), None, eviction_policy=
'evict_last')
tmp255 = tl.load(in_ptr1 + (18 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp256 = tl.load(in_ptr0 + (18 + 4 * r0), None, eviction_policy=
'evict_last')
tmp260 = tl.load(in_ptr1 + (19 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp261 = tl.load(in_ptr0 + (19 + 4 * r0), None, eviction_policy=
'evict_last')
tmp265 = tl.load(in_ptr1 + 20)
tmp266 = tl.broadcast_to(tmp265, [XBLOCK, RBLOCK])
tmp269 = tl.load(in_ptr1 + 21)
tmp270 = tl.broadcast_to(tmp269, [XBLOCK, RBLOCK])
tmp274 = tl.load(in_ptr1 + 22)
tmp275 = tl.broadcast_to(tmp274, [XBLOCK, RBLOCK])
tmp279 = tl.load(in_ptr1 + 23)
tmp280 = tl.broadcast_to(tmp279, [XBLOCK, RBLOCK])
tmp285 = tl.load(in_ptr1 + 24)
tmp286 = tl.broadcast_to(tmp285, [XBLOCK, RBLOCK])
tmp289 = tl.load(in_ptr1 + 25)
tmp290 = tl.broadcast_to(tmp289, [XBLOCK, RBLOCK])
tmp294 = tl.load(in_ptr1 + 26)
tmp295 = tl.broadcast_to(tmp294, [XBLOCK, RBLOCK])
tmp299 = tl.load(in_ptr1 + 27)
tmp300 = tl.broadcast_to(tmp299, [XBLOCK, RBLOCK])
tmp305 = tl.load(in_ptr1 + 28)
tmp306 = tl.broadcast_to(tmp305, [XBLOCK, RBLOCK])
tmp309 = tl.load(in_ptr1 + 29)
tmp310 = tl.broadcast_to(tmp309, [XBLOCK, RBLOCK])
tmp314 = tl.load(in_ptr1 + 30)
tmp315 = tl.broadcast_to(tmp314, [XBLOCK, RBLOCK])
tmp319 = tl.load(in_ptr1 + 31)
tmp320 = tl.broadcast_to(tmp319, [XBLOCK, RBLOCK])
tmp328 = tl.load(in_ptr0 + (48 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp329 = tl.load(in_ptr1 + (48 + 4 * r0), None, eviction_policy=
'evict_last')
tmp332 = tl.load(in_ptr0 + (49 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp333 = tl.load(in_ptr1 + (49 + 4 * r0), None, eviction_policy=
'evict_last')
tmp337 = tl.load(in_ptr0 + (50 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp338 = tl.load(in_ptr1 + (50 + 4 * r0), None, eviction_policy=
'evict_last')
tmp342 = tl.load(in_ptr0 + (51 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp343 = tl.load(in_ptr1 + (51 + 4 * r0), None, eviction_policy=
'evict_last')
tmp347 = tl.load(in_ptr0 + 52)
tmp348 = tl.broadcast_to(tmp347, [XBLOCK, RBLOCK])
tmp351 = tl.load(in_ptr0 + 53)
tmp352 = tl.broadcast_to(tmp351, [XBLOCK, RBLOCK])
tmp356 = tl.load(in_ptr0 + 54)
tmp357 = tl.broadcast_to(tmp356, [XBLOCK, RBLOCK])
tmp361 = tl.load(in_ptr0 + 55)
tmp362 = tl.broadcast_to(tmp361, [XBLOCK, RBLOCK])
tmp367 = tl.load(in_ptr0 + 56)
tmp368 = tl.broadcast_to(tmp367, [XBLOCK, RBLOCK])
tmp371 = tl.load(in_ptr0 + 57)
tmp372 = tl.broadcast_to(tmp371, [XBLOCK, RBLOCK])
tmp376 = tl.load(in_ptr0 + 58)
tmp377 = tl.broadcast_to(tmp376, [XBLOCK, RBLOCK])
tmp381 = tl.load(in_ptr0 + 59)
tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK])
tmp387 = tl.load(in_ptr0 + 60)
tmp388 = tl.broadcast_to(tmp387, [XBLOCK, RBLOCK])
tmp391 = tl.load(in_ptr0 + 61)
tmp392 = tl.broadcast_to(tmp391, [XBLOCK, RBLOCK])
tmp396 = tl.load(in_ptr0 + 62)
tmp397 = tl.broadcast_to(tmp396, [XBLOCK, RBLOCK])
tmp401 = tl.load(in_ptr0 + 63)
tmp402 = tl.broadcast_to(tmp401, [XBLOCK, RBLOCK])
tmp410 = tl.load(in_ptr0 + (32 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp411 = tl.load(in_ptr1 + (32 + 4 * r0), None, eviction_policy=
'evict_last')
tmp414 = tl.load(in_ptr0 + (33 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp415 = tl.load(in_ptr1 + (33 + 4 * r0), None, eviction_policy=
'evict_last')
tmp419 = tl.load(in_ptr0 + (34 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp420 = tl.load(in_ptr1 + (34 + 4 * r0), None, eviction_policy=
'evict_last')
tmp424 = tl.load(in_ptr0 + (35 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp425 = tl.load(in_ptr1 + (35 + 4 * r0), None, eviction_policy=
'evict_last')
tmp429 = tl.load(in_ptr0 + 36)
tmp430 = tl.broadcast_to(tmp429, [XBLOCK, RBLOCK])
tmp433 = tl.load(in_ptr0 + 37)
tmp434 = tl.broadcast_to(tmp433, [XBLOCK, RBLOCK])
tmp438 = tl.load(in_ptr0 + 38)
tmp439 = tl.broadcast_to(tmp438, [XBLOCK, RBLOCK])
tmp443 = tl.load(in_ptr0 + 39)
tmp444 = tl.broadcast_to(tmp443, [XBLOCK, RBLOCK])
tmp449 = tl.load(in_ptr0 + 40)
tmp450 = tl.broadcast_to(tmp449, [XBLOCK, RBLOCK])
tmp453 = tl.load(in_ptr0 + 41)
tmp454 = tl.broadcast_to(tmp453, [XBLOCK, RBLOCK])
tmp458 = tl.load(in_ptr0 + 42)
tmp459 = tl.broadcast_to(tmp458, [XBLOCK, RBLOCK])
tmp463 = tl.load(in_ptr0 + 43)
tmp464 = tl.broadcast_to(tmp463, [XBLOCK, RBLOCK])
tmp469 = tl.load(in_ptr0 + 44)
tmp470 = tl.broadcast_to(tmp469, [XBLOCK, RBLOCK])
tmp473 = tl.load(in_ptr0 + 45)
tmp474 = tl.broadcast_to(tmp473, [XBLOCK, RBLOCK])
tmp478 = tl.load(in_ptr0 + 46)
tmp479 = tl.broadcast_to(tmp478, [XBLOCK, RBLOCK])
tmp483 = tl.load(in_ptr0 + 47)
tmp484 = tl.broadcast_to(tmp483, [XBLOCK, RBLOCK])
tmp492 = tl.load(in_ptr1 + (48 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp493 = tl.load(in_ptr0 + (48 + 4 * r0), None, eviction_policy=
'evict_last')
tmp496 = tl.load(in_ptr1 + (49 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp497 = tl.load(in_ptr0 + (49 + 4 * r0), None, eviction_policy=
'evict_last')
tmp501 = tl.load(in_ptr1 + (50 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp502 = tl.load(in_ptr0 + (50 + 4 * r0), None, eviction_policy=
'evict_last')
tmp506 = tl.load(in_ptr1 + (51 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp507 = tl.load(in_ptr0 + (51 + 4 * r0), None, eviction_policy=
'evict_last')
tmp511 = tl.load(in_ptr1 + 52)
tmp512 = tl.broadcast_to(tmp511, [XBLOCK, RBLOCK])
tmp515 = tl.load(in_ptr1 + 53)
tmp516 = tl.broadcast_to(tmp515, [XBLOCK, RBLOCK])
tmp520 = tl.load(in_ptr1 + 54)
tmp521 = tl.broadcast_to(tmp520, [XBLOCK, RBLOCK])
tmp525 = tl.load(in_ptr1 + 55)
tmp526 = tl.broadcast_to(tmp525, [XBLOCK, RBLOCK])
tmp531 = tl.load(in_ptr1 + 56)
tmp532 = tl.broadcast_to(tmp531, [XBLOCK, RBLOCK])
tmp535 = tl.load(in_ptr1 + 57)
tmp536 = tl.broadcast_to(tmp535, [XBLOCK, RBLOCK])
tmp540 = tl.load(in_ptr1 + 58)
tmp541 = tl.broadcast_to(tmp540, [XBLOCK, RBLOCK])
tmp545 = tl.load(in_ptr1 + 59)
tmp546 = tl.broadcast_to(tmp545, [XBLOCK, RBLOCK])
tmp551 = tl.load(in_ptr1 + 60)
tmp552 = tl.broadcast_to(tmp551, [XBLOCK, RBLOCK])
tmp555 = tl.load(in_ptr1 + 61)
tmp556 = tl.broadcast_to(tmp555, [XBLOCK, RBLOCK])
tmp560 = tl.load(in_ptr1 + 62)
tmp561 = tl.broadcast_to(tmp560, [XBLOCK, RBLOCK])
tmp565 = tl.load(in_ptr1 + 63)
tmp566 = tl.broadcast_to(tmp565, [XBLOCK, RBLOCK])
tmp574 = tl.load(in_ptr1 + (32 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp575 = tl.load(in_ptr0 + (32 + 4 * r0), None, eviction_policy=
'evict_last')
tmp578 = tl.load(in_ptr1 + (33 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp579 = tl.load(in_ptr0 + (33 + 4 * r0), None, eviction_policy=
'evict_last')
tmp583 = tl.load(in_ptr1 + (34 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp584 = tl.load(in_ptr0 + (34 + 4 * r0), None, eviction_policy=
'evict_last')
tmp588 = tl.load(in_ptr1 + (35 + 4 * (4 * r0 % 4)), None,
eviction_policy='evict_last')
tmp589 = tl.load(in_ptr0 + (35 + 4 * r0), None, eviction_policy=
'evict_last')
tmp593 = tl.load(in_ptr1 + 36)
tmp594 = tl.broadcast_to(tmp593, [XBLOCK, RBLOCK])
tmp597 = tl.load(in_ptr1 + 37)
tmp598 = tl.broadcast_to(tmp597, [XBLOCK, RBLOCK])
tmp602 = tl.load(in_ptr1 + 38)
tmp603 = tl.broadcast_to(tmp602, [XBLOCK, RBLOCK])
tmp607 = tl.load(in_ptr1 + 39)
tmp608 = tl.broadcast_to(tmp607, [XBLOCK, RBLOCK])
tmp613 = tl.load(in_ptr1 + 40)
tmp614 = tl.broadcast_to(tmp613, [XBLOCK, RBLOCK])
tmp617 = tl.load(in_ptr1 + 41)
tmp618 = tl.broadcast_to(tmp617, [XBLOCK, RBLOCK])
tmp622 = tl.load(in_ptr1 + 42)
tmp623 = tl.broadcast_to(tmp622, [XBLOCK, RBLOCK])
tmp627 = tl.load(in_ptr1 + 43)
tmp628 = tl.broadcast_to(tmp627, [XBLOCK, RBLOCK])
tmp633 = tl.load(in_ptr1 + 44)
tmp634 = tl.broadcast_to(tmp633, [XBLOCK, RBLOCK])
tmp637 = tl.load(in_ptr1 + 45)
tmp638 = tl.broadcast_to(tmp637, [XBLOCK, RBLOCK])
tmp642 = tl.load(in_ptr1 + 46)
tmp643 = tl.broadcast_to(tmp642, [XBLOCK, RBLOCK])
tmp647 = tl.load(in_ptr1 + 47)
tmp648 = tl.broadcast_to(tmp647, [XBLOCK, RBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp21 = tmp20 - tmp1
tmp22 = tmp21 * tmp21
tmp25 = tmp24 - tmp5
tmp26 = tmp25 * tmp25
tmp27 = tmp22 + tmp26
tmp30 = tmp29 - tmp10
tmp31 = tmp30 * tmp30
tmp32 = tmp27 + tmp31
tmp35 = tmp34 - tmp15
tmp36 = tmp35 * tmp35
tmp37 = tmp32 + tmp36
tmp38 = triton_helpers.minimum(tmp18, tmp37)
tmp41 = tmp40 - tmp1
tmp42 = tmp41 * tmp41
tmp45 = tmp44 - tmp5
tmp46 = tmp45 * tmp45
tmp47 = tmp42 + tmp46
tmp50 = tmp49 - tmp10
tmp51 = tmp50 * tmp50
tmp52 = tmp47 + tmp51
tmp55 = tmp54 - tmp15
tmp56 = tmp55 * tmp55
tmp57 = tmp52 + tmp56
tmp58 = triton_helpers.minimum(tmp38, tmp57)
tmp61 = tmp60 - tmp1
tmp62 = tmp61 * tmp61
tmp65 = tmp64 - tmp5
tmp66 = tmp65 * tmp65
tmp67 = tmp62 + tmp66
tmp70 = tmp69 - tmp10
tmp71 = tmp70 * tmp70
tmp72 = tmp67 + tmp71
tmp75 = tmp74 - tmp15
tmp76 = tmp75 * tmp75
tmp77 = tmp72 + tmp76
tmp78 = triton_helpers.minimum(tmp58, tmp77)
tmp79 = tl.broadcast_to(tmp78, [XBLOCK, RBLOCK])
tmp81 = tl.sum(tmp79, 1)[:, None]
tmp84 = tmp82 - tmp83
tmp85 = tmp84 * tmp84
tmp88 = tmp86 - tmp87
tmp89 = tmp88 * tmp88
tmp90 = tmp85 + tmp89
tmp93 = tmp91 - tmp92
tmp94 = tmp93 * tmp93
tmp95 = tmp90 + tmp94
tmp98 = tmp96 - tmp97
tmp99 = tmp98 * tmp98
tmp100 = tmp95 + tmp99
tmp103 = tmp102 - tmp83
tmp104 = tmp103 * tmp103
tmp107 = tmp106 - tmp87
tmp108 = tmp107 * tmp107
tmp109 = tmp104 + tmp108
tmp112 = tmp111 - tmp92
tmp113 = tmp112 * tmp112
tmp114 = tmp109 + tmp113
tmp117 = tmp116 - tmp97
tmp118 = tmp117 * tmp117
tmp119 = tmp114 + tmp118
tmp120 = triton_helpers.minimum(tmp100, tmp119)
tmp123 = tmp122 - tmp83
tmp124 = tmp123 * tmp123
tmp127 = tmp126 - tmp87
tmp128 = tmp127 * tmp127
tmp129 = tmp124 + tmp128
tmp132 = tmp131 - tmp92
tmp133 = tmp132 * tmp132
tmp134 = tmp129 + tmp133
tmp137 = tmp136 - tmp97
tmp138 = tmp137 * tmp137
tmp139 = tmp134 + tmp138
tmp140 = triton_helpers.minimum(tmp120, tmp139)
tmp143 = tmp142 - tmp83
tmp144 = tmp143 * tmp143
tmp147 = tmp146 - tmp87
tmp148 = tmp147 * tmp147
tmp149 = tmp144 + tmp148
tmp152 = tmp151 - tmp92
tmp153 = tmp152 * tmp152
tmp154 = tmp149 + tmp153
tmp157 = tmp156 - tmp97
tmp158 = tmp157 * tmp157
tmp159 = tmp154 + tmp158
tmp160 = triton_helpers.minimum(tmp140, tmp159)
tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK])
tmp163 = tl.sum(tmp161, 1)[:, None]
tmp166 = tmp164 - tmp165
tmp167 = tmp166 * tmp166
tmp170 = tmp168 - tmp169
tmp171 = tmp170 * tmp170
tmp172 = tmp167 + tmp171
tmp175 = tmp173 - tmp174
tmp176 = tmp175 * tmp175
tmp177 = tmp172 + tmp176
tmp180 = tmp178 - tmp179
tmp181 = tmp180 * tmp180
tmp182 = tmp177 + tmp181
tmp185 = tmp184 - tmp165
tmp186 = tmp185 * tmp185
tmp189 = tmp188 - tmp169
tmp190 = tmp189 * tmp189
tmp191 = tmp186 + tmp190
tmp194 = tmp193 - tmp174
tmp195 = tmp194 * tmp194
tmp196 = tmp191 + tmp195
tmp199 = tmp198 - tmp179
tmp200 = tmp199 * tmp199
tmp201 = tmp196 + tmp200
tmp202 = triton_helpers.minimum(tmp182, tmp201)
tmp205 = tmp204 - tmp165
tmp206 = tmp205 * tmp205
tmp209 = tmp208 - tmp169
tmp210 = tmp209 * tmp209
tmp211 = tmp206 + tmp210
tmp214 = tmp213 - tmp174
tmp215 = tmp214 * tmp214
tmp216 = tmp211 + tmp215
tmp219 = tmp218 - tmp179
tmp220 = tmp219 * tmp219
tmp221 = tmp216 + tmp220
tmp222 = triton_helpers.minimum(tmp202, tmp221)
tmp225 = tmp224 - tmp165
tmp226 = tmp225 * tmp225
tmp229 = tmp228 - tmp169
tmp230 = tmp229 * tmp229
tmp231 = tmp226 + tmp230
tmp234 = tmp233 - tmp174
tmp235 = tmp234 * tmp234
tmp236 = tmp231 + tmp235
tmp239 = tmp238 - tmp179
tmp240 = tmp239 * tmp239
tmp241 = tmp236 + tmp240
tmp242 = triton_helpers.minimum(tmp222, tmp241)
tmp243 = tl.broadcast_to(tmp242, [XBLOCK, RBLOCK])
tmp245 = tl.sum(tmp243, 1)[:, None]
tmp248 = tmp246 - tmp247
tmp249 = tmp248 * tmp248
tmp252 = tmp250 - tmp251
tmp253 = tmp252 * tmp252
tmp254 = tmp249 + tmp253
tmp257 = tmp255 - tmp256
tmp258 = tmp257 * tmp257
tmp259 = tmp254 + tmp258
tmp262 = tmp260 - tmp261
tmp263 = tmp262 * tmp262
tmp264 = tmp259 + tmp263
tmp267 = tmp266 - tmp247
tmp268 = tmp267 * tmp267
tmp271 = tmp270 - tmp251
tmp272 = tmp271 * tmp271
tmp273 = tmp268 + tmp272
tmp276 = tmp275 - tmp256
tmp277 = tmp276 * tmp276
tmp278 = tmp273 + tmp277
tmp281 = tmp280 - tmp261
tmp282 = tmp281 * tmp281
tmp283 = tmp278 + tmp282
tmp284 = triton_helpers.minimum(tmp264, tmp283)
tmp287 = tmp286 - tmp247
tmp288 = tmp287 * tmp287
tmp291 = tmp290 - tmp251
tmp292 = tmp291 * tmp291
tmp293 = tmp288 + tmp292
tmp296 = tmp295 - tmp256
tmp297 = tmp296 * tmp296
tmp298 = tmp293 + tmp297
tmp301 = tmp300 - tmp261
tmp302 = tmp301 * tmp301
tmp303 = tmp298 + tmp302
tmp304 = triton_helpers.minimum(tmp284, tmp303)
tmp307 = tmp306 - tmp247
tmp308 = tmp307 * tmp307
tmp311 = tmp310 - tmp251
tmp312 = tmp311 * tmp311
tmp313 = tmp308 + tmp312
tmp316 = tmp315 - tmp256
tmp317 = tmp316 * tmp316
tmp318 = tmp313 + tmp317
tmp321 = tmp320 - tmp261
tmp322 = tmp321 * tmp321
tmp323 = tmp318 + tmp322
tmp324 = triton_helpers.minimum(tmp304, tmp323)
tmp325 = tl.broadcast_to(tmp324, [XBLOCK, RBLOCK])
tmp327 = tl.sum(tmp325, 1)[:, None]
tmp330 = tmp328 - tmp329
tmp331 = tmp330 * tmp330
tmp334 = tmp332 - tmp333
tmp335 = tmp334 * tmp334
tmp336 = tmp331 + tmp335
tmp339 = tmp337 - tmp338
tmp340 = tmp339 * tmp339
tmp341 = tmp336 + tmp340
tmp344 = tmp342 - tmp343
tmp345 = tmp344 * tmp344
tmp346 = tmp341 + tmp345
tmp349 = tmp348 - tmp329
tmp350 = tmp349 * tmp349
tmp353 = tmp352 - tmp333
tmp354 = tmp353 * tmp353
tmp355 = tmp350 + tmp354
tmp358 = tmp357 - tmp338
tmp359 = tmp358 * tmp358
tmp360 = tmp355 + tmp359
tmp363 = tmp362 - tmp343
tmp364 = tmp363 * tmp363
tmp365 = tmp360 + tmp364
tmp366 = triton_helpers.minimum(tmp346, tmp365)
tmp369 = tmp368 - tmp329
tmp370 = tmp369 * tmp369
tmp373 = tmp372 - tmp333
tmp374 = tmp373 * tmp373
tmp375 = tmp370 + tmp374
tmp378 = tmp377 - tmp338
tmp379 = tmp378 * tmp378
tmp380 = tmp375 + tmp379
tmp383 = tmp382 - tmp343
tmp384 = tmp383 * tmp383
tmp385 = tmp380 + tmp384
tmp386 = triton_helpers.minimum(tmp366, tmp385)
tmp389 = tmp388 - tmp329
tmp390 = tmp389 * tmp389
tmp393 = tmp392 - tmp333
tmp394 = tmp393 * tmp393
tmp395 = tmp390 + tmp394
tmp398 = tmp397 - tmp338
tmp399 = tmp398 * tmp398
tmp400 = tmp395 + tmp399
tmp403 = tmp402 - tmp343
tmp404 = tmp403 * tmp403
tmp405 = tmp400 + tmp404
tmp406 = triton_helpers.minimum(tmp386, tmp405)
tmp407 = tl.broadcast_to(tmp406, [XBLOCK, RBLOCK])
tmp409 = tl.sum(tmp407, 1)[:, None]
tmp412 = tmp410 - tmp411
tmp413 = tmp412 * tmp412
tmp416 = tmp414 - tmp415
tmp417 = tmp416 * tmp416
tmp418 = tmp413 + tmp417
tmp421 = tmp419 - tmp420
tmp422 = tmp421 * tmp421
tmp423 = tmp418 + tmp422
tmp426 = tmp424 - tmp425
tmp427 = tmp426 * tmp426
tmp428 = tmp423 + tmp427
tmp431 = tmp430 - tmp411
tmp432 = tmp431 * tmp431
tmp435 = tmp434 - tmp415
tmp436 = tmp435 * tmp435
tmp437 = tmp432 + tmp436
tmp440 = tmp439 - tmp420
tmp441 = tmp440 * tmp440
tmp442 = tmp437 + tmp441
tmp445 = tmp444 - tmp425
tmp446 = tmp445 * tmp445
tmp447 = tmp442 + tmp446
tmp448 = triton_helpers.minimum(tmp428, tmp447)
tmp451 = tmp450 - tmp411
tmp452 = tmp451 * tmp451
tmp455 = tmp454 - tmp415
tmp456 = tmp455 * tmp455
tmp457 = tmp452 + tmp456
tmp460 = tmp459 - tmp420
tmp461 = tmp460 * tmp460
tmp462 = tmp457 + tmp461
tmp465 = tmp464 - tmp425
tmp466 = tmp465 * tmp465
tmp467 = tmp462 + tmp466
tmp468 = triton_helpers.minimum(tmp448, tmp467)
tmp471 = tmp470 - tmp411
tmp472 = tmp471 * tmp471
tmp475 = tmp474 - tmp415
tmp476 = tmp475 * tmp475
tmp477 = tmp472 + tmp476
tmp480 = tmp479 - tmp420
tmp481 = tmp480 * tmp480
tmp482 = tmp477 + tmp481
tmp485 = tmp484 - tmp425
tmp486 = tmp485 * tmp485
tmp487 = tmp482 + tmp486
tmp488 = triton_helpers.minimum(tmp468, tmp487)
tmp489 = tl.broadcast_to(tmp488, [XBLOCK, RBLOCK])
tmp491 = tl.sum(tmp489, 1)[:, None]
tmp494 = tmp492 - tmp493
tmp495 = tmp494 * tmp494
tmp498 = tmp496 - tmp497
tmp499 = tmp498 * tmp498
tmp500 = tmp495 + tmp499
tmp503 = tmp501 - tmp502
tmp504 = tmp503 * tmp503
tmp505 = tmp500 + tmp504
tmp508 = tmp506 - tmp507
tmp509 = tmp508 * tmp508
tmp510 = tmp505 + tmp509
tmp513 = tmp512 - tmp493
tmp514 = tmp513 * tmp513
tmp517 = tmp516 - tmp497
tmp518 = tmp517 * tmp517
tmp519 = tmp514 + tmp518
tmp522 = tmp521 - tmp502
tmp523 = tmp522 * tmp522
tmp524 = tmp519 + tmp523
tmp527 = tmp526 - tmp507
tmp528 = tmp527 * tmp527
tmp529 = tmp524 + tmp528
tmp530 = triton_helpers.minimum(tmp510, tmp529)
tmp533 = tmp532 - tmp493
tmp534 = tmp533 * tmp533
tmp537 = tmp536 - tmp497
tmp538 = tmp537 * tmp537
tmp539 = tmp534 + tmp538
tmp542 = tmp541 - tmp502
tmp543 = tmp542 * tmp542
tmp544 = tmp539 + tmp543
tmp547 = tmp546 - tmp507
tmp548 = tmp547 * tmp547
tmp549 = tmp544 + tmp548
tmp550 = triton_helpers.minimum(tmp530, tmp549)
tmp553 = tmp552 - tmp493
tmp554 = tmp553 * tmp553
tmp557 = tmp556 - tmp497
tmp558 = tmp557 * tmp557
tmp559 = tmp554 + tmp558
tmp562 = tmp561 - tmp502
tmp563 = tmp562 * tmp562
tmp564 = tmp559 + tmp563
tmp567 = tmp566 - tmp507
tmp568 = tmp567 * tmp567
tmp569 = tmp564 + tmp568
tmp570 = triton_helpers.minimum(tmp550, tmp569)
tmp571 = tl.broadcast_to(tmp570, [XBLOCK, RBLOCK])
tmp573 = tl.sum(tmp571, 1)[:, None]
tmp576 = tmp574 - tmp575
tmp577 = tmp576 * tmp576
tmp580 = tmp578 - tmp579
tmp581 = tmp580 * tmp580
tmp582 = tmp577 + tmp581
tmp585 = tmp583 - tmp584
tmp586 = tmp585 * tmp585
tmp587 = tmp582 + tmp586
tmp590 = tmp588 - tmp589
tmp591 = tmp590 * tmp590
tmp592 = tmp587 + tmp591
tmp595 = tmp594 - tmp575
tmp596 = tmp595 * tmp595
tmp599 = tmp598 - tmp579
tmp600 = tmp599 * tmp599
tmp601 = tmp596 + tmp600
tmp604 = tmp603 - tmp584
tmp605 = tmp604 * tmp604
tmp606 = tmp601 + tmp605
tmp609 = tmp608 - tmp589
tmp610 = tmp609 * tmp609
tmp611 = tmp606 + tmp610
tmp612 = triton_helpers.minimum(tmp592, tmp611)
tmp615 = tmp614 - tmp575
tmp616 = tmp615 * tmp615
tmp619 = tmp618 - tmp579
tmp620 = tmp619 * tmp619
tmp621 = tmp616 + tmp620
tmp624 = tmp623 - tmp584
tmp625 = tmp624 * tmp624
tmp626 = tmp621 + tmp625
tmp629 = tmp628 - tmp589
tmp630 = tmp629 * tmp629
tmp631 = tmp626 + tmp630
tmp632 = triton_helpers.minimum(tmp612, tmp631)
tmp635 = tmp634 - tmp575
tmp636 = tmp635 * tmp635
tmp639 = tmp638 - tmp579
tmp640 = tmp639 * tmp639
tmp641 = tmp636 + tmp640
tmp644 = tmp643 - tmp584
tmp645 = tmp644 * tmp644
tmp646 = tmp641 + tmp645
tmp649 = tmp648 - tmp589
tmp650 = tmp649 * tmp649
tmp651 = tmp646 + tmp650
tmp652 = triton_helpers.minimum(tmp632, tmp651)
tmp653 = tl.broadcast_to(tmp652, [XBLOCK, RBLOCK])
tmp655 = tl.sum(tmp653, 1)[:, None]
tmp656 = 4.0
tmp657 = tmp81 / tmp656
tmp658 = 0.5
tmp659 = tmp657 * tmp658
tmp660 = tmp163 / tmp656
tmp661 = tmp660 * tmp658
tmp662 = tmp659 + tmp661
tmp663 = 0.25
tmp664 = tmp662 * tmp663
tmp665 = 0.0
tmp666 = tmp664 + tmp665
tmp667 = tmp245 / tmp656
tmp668 = tmp667 * tmp658
tmp669 = tmp327 / tmp656
tmp670 = tmp669 * tmp658
tmp671 = tmp668 + tmp670
tmp672 = tmp671 * tmp663
tmp673 = tmp666 + tmp672
tmp674 = tmp491 / tmp656
tmp675 = tmp674 * tmp658
tmp676 = tmp655 / tmp656
tmp677 = tmp676 * tmp658
tmp678 = tmp675 + tmp677
tmp679 = tmp678 * tmp663
tmp680 = tmp673 + tmp679
tmp681 = tmp409 / tmp656
tmp682 = tmp681 * tmp658
tmp683 = tmp573 / tmp656
tmp684 = tmp683 * tmp658
tmp685 = tmp682 + tmp684
tmp686 = tmp685 * tmp663
tmp687 = tmp680 + tmp686
tmp688 = 100.0
tmp689 = tmp687 * tmp688
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp689, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((), (), torch.float32)
buf14 = buf10
del buf10
buf17 = buf14
del buf14
get_raw_stream(0)
triton_per_fused_add_div_mean_min_mul_0[grid(1)](buf17, arg0_1,
arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf17,
def array2samples_distance(array1, array2):
"""
arguments:
array1: the array, size: (num_point, num_feature)
array2: the samples, size: (num_point, num_feature)
returns:
distances: each entry is the distance from a sample to array1
"""
num_point1, _num_features1 = array1.shape
num_point2, num_features2 = array2.shape
expanded_array1 = array1.repeat(num_point2, 1)
expanded_array2 = torch.reshape(torch.unsqueeze(array2, 1).repeat(1,
num_point1, 1), (-1, num_features2))
distances = (expanded_array1 - expanded_array2) * (expanded_array1 -
expanded_array2)
distances = torch.sum(distances, dim=1)
distances = torch.reshape(distances, (num_point2, num_point1))
distances = torch.min(distances, dim=1)[0]
distances = torch.mean(distances)
return distances
def chamfer_distance_numpy(array1, array2):
batch_size, _num_point, _num_features = array1.shape
dist = 0
for i in range(batch_size):
av_dist1 = array2samples_distance(array1[i], array2[i])
av_dist2 = array2samples_distance(array2[i], array1[i])
dist = dist + (0.5 * av_dist1 + 0.5 * av_dist2) / batch_size
return dist * 100
class PointLossNew(nn.Module):
def __init__(self):
super(PointLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| wendydidi/MISO-PCN | PointLoss | false | 4,544 | [
"MIT"
] | 0 | fdb8ed80d16ed5d019c3ca85e26ce23884067c0d | https://github.com/wendydidi/MISO-PCN/tree/fdb8ed80d16ed5d019c3ca85e26ce23884067c0d | import torch
import torch.nn as nn
def array2samples_distance(array1, array2):
"""
arguments:
array1: the array, size: (num_point, num_feature)
array2: the samples, size: (num_point, num_feature)
returns:
distances: each entry is the distance from a sample to array1
"""
num_point1, _num_features1 = array1.shape
num_point2, num_features2 = array2.shape
expanded_array1 = array1.repeat(num_point2, 1)
expanded_array2 = torch.reshape(torch.unsqueeze(array2, 1).repeat(1,
num_point1, 1), (-1, num_features2))
distances = (expanded_array1 - expanded_array2) * (expanded_array1 -
expanded_array2)
distances = torch.sum(distances, dim=1)
distances = torch.reshape(distances, (num_point2, num_point1))
distances = torch.min(distances, dim=1)[0]
distances = torch.mean(distances)
return distances
def chamfer_distance_numpy(array1, array2):
batch_size, _num_point, _num_features = array1.shape
dist = 0
for i in range(batch_size):
av_dist1 = array2samples_distance(array1[i], array2[i])
av_dist2 = array2samples_distance(array2[i], array1[i])
dist = dist + (0.5 * av_dist1 + 0.5 * av_dist2) / batch_size
return dist * 100
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, array1, array2):
return chamfer_distance_numpy(array1, array2)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
Dueling_DQN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jo/cjoedcmy7gkzotfopp7atueg5hlb65rgyaj7o2wkgwedzdx5r26r.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_2), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tj/ctjbdubzq27k64ogf43asej6474qk3zscfyidqrh5oszfb7rplqn.py
# Topologically Sorted Source Nodes: [a_mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# a_mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [1]), kwargs = {})
triton_poi_fused_mean_1 = async_compile.triton('triton_poi_fused_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], 0, tl.int64)
tmp1 = tmp0 >= tmp0
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp0 < tmp2
tmp4 = tl.load(in_ptr0 + (4*x0), tmp3 & xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tmp0 >= tmp2
tmp6 = tl.full([1], 2, tl.int64)
tmp7 = tmp0 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), tmp8 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp0 >= tmp6
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (2 + (4*x0)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp0 >= tmp11
tmp16 = tl.full([1], 4, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = tl.where(tmp13, tmp14, tmp18)
tmp20 = tl.where(tmp8, tmp9, tmp19)
tmp21 = tl.where(tmp3, tmp4, tmp20)
tmp22 = tmp2 >= tmp0
tmp23 = tmp2 < tmp2
tmp24 = tl.load(in_ptr0 + (4*x0), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp2 >= tmp2
tmp26 = tmp2 < tmp6
tmp27 = tmp25 & tmp26
tmp28 = tl.load(in_ptr0 + (1 + (4*x0)), tmp27 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp2 >= tmp6
tmp30 = tmp2 < tmp11
tmp31 = tmp29 & tmp30
tmp32 = tl.load(in_ptr0 + (2 + (4*x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tmp2 >= tmp11
tmp34 = tmp2 < tmp16
tmp35 = tl.load(in_ptr0 + (3 + (4*x0)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.where(tmp31, tmp32, tmp35)
tmp37 = tl.where(tmp27, tmp28, tmp36)
tmp38 = tl.where(tmp23, tmp24, tmp37)
tmp39 = tmp21 + tmp38
tmp40 = tmp6 >= tmp0
tmp41 = tmp6 < tmp2
tmp42 = tl.load(in_ptr0 + (4*x0), tmp41 & xmask, eviction_policy='evict_last', other=0.0)
tmp43 = tmp6 >= tmp2
tmp44 = tmp6 < tmp6
tmp45 = tmp43 & tmp44
tmp46 = tl.load(in_ptr0 + (1 + (4*x0)), tmp45 & xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tmp6 >= tmp6
tmp48 = tmp6 < tmp11
tmp49 = tmp47 & tmp48
tmp50 = tl.load(in_ptr0 + (2 + (4*x0)), tmp49 & xmask, eviction_policy='evict_last', other=0.0)
tmp51 = tmp6 >= tmp11
tmp52 = tmp6 < tmp16
tmp53 = tl.load(in_ptr0 + (3 + (4*x0)), tmp51 & xmask, eviction_policy='evict_last', other=0.0)
tmp54 = tl.where(tmp49, tmp50, tmp53)
tmp55 = tl.where(tmp45, tmp46, tmp54)
tmp56 = tl.where(tmp41, tmp42, tmp55)
tmp57 = tmp39 + tmp56
tmp58 = tmp11 >= tmp0
tmp59 = tmp11 < tmp2
tmp60 = tl.load(in_ptr0 + (4*x0), tmp59 & xmask, eviction_policy='evict_last', other=0.0)
tmp61 = tmp11 >= tmp2
tmp62 = tmp11 < tmp6
tmp63 = tmp61 & tmp62
tmp64 = tl.load(in_ptr0 + (1 + (4*x0)), tmp63 & xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tmp11 >= tmp6
tmp66 = tmp11 < tmp11
tmp67 = tmp65 & tmp66
tmp68 = tl.load(in_ptr0 + (2 + (4*x0)), tmp67 & xmask, eviction_policy='evict_last', other=0.0)
tmp69 = tmp11 >= tmp11
tmp70 = tmp11 < tmp16
tmp71 = tl.load(in_ptr0 + (3 + (4*x0)), tmp69 & xmask, eviction_policy='evict_last', other=0.0)
tmp72 = tl.where(tmp67, tmp68, tmp71)
tmp73 = tl.where(tmp63, tmp64, tmp72)
tmp74 = tl.where(tmp59, tmp60, tmp73)
tmp75 = tmp57 + tmp74
tmp76 = 4.0
tmp77 = tmp75 / tmp76
tl.store(out_ptr0 + (x0), tmp77, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pb/cpbcy5pyu4zhpzfxf7a6xu2ndk6ilijodflhglzty5licqmyy3a3.py
# Topologically Sorted Source Nodes: [repeat, add, repeat_1, x_2], Original ATen: [aten.repeat, aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# repeat => repeat
# repeat_1 => repeat_1
# x_2 => sub
# Graph fragment:
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%addmm_2, [1, 4]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%repeat, %addmm_3), kwargs = {})
# %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%mean, [1, 4]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %repeat_1), kwargs = {})
triton_poi_fused_add_repeat_sub_2 = async_compile.triton('triton_poi_fused_add_repeat_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_repeat_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_repeat_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 16, grid=grid(16), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [a_mean], Original ATen: [aten.mean]
triton_poi_fused_mean_1.run(buf8, buf9, 4, grid=grid(4), stream=stream0)
buf10 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [repeat, add, repeat_1, x_2], Original ATen: [aten.repeat, aten.add, aten.sub]
triton_poi_fused_add_repeat_sub_2.run(buf10, buf7, buf9, 16, grid=grid(16), stream=stream0)
del buf7
del buf9
return (buf10, primals_3, buf1, buf2, buf4, buf5, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class Dueling_DQN(nn.Module):
def __init__(self, args):
super().__init__()
self.state_space = args.state_space
self.fc1 = nn.Linear(self.state_space, args.hidden_size)
self.action_space = args.action_space
self.fc_h = nn.Linear(args.hidden_size, args.hidden_size)
self.fc_z_v = nn.Linear(args.hidden_size, 1)
self.fc_z_a = nn.Linear(args.hidden_size, self.action_space)
def forward(self, x):
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc_h(x))
v, a = self.fc_z_v(x), self.fc_z_a(x)
a_mean = torch.stack(a.chunk(self.action_space, 1), 1).mean(1)
x = v.repeat(1, self.action_space) + a - a_mean.repeat(1, self.
action_space)
return x
def parameter_update(self, source):
self.load_state_dict(source.state_dict())
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(state_space=4, hidden_size=4,
action_space=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], 0, tl.int64)
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp0 < tmp2
tmp4 = tl.load(in_ptr0 + 4 * x0, tmp3 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp5 = tmp0 >= tmp2
tmp6 = tl.full([1], 2, tl.int64)
tmp7 = tmp0 < tmp6
tmp8 = tmp5 & tmp7
tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), tmp8 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tmp0 >= tmp6
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (2 + 4 * x0), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp0 >= tmp11
tl.full([1], 4, tl.int64)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), tmp15 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp19 = tl.where(tmp13, tmp14, tmp18)
tmp20 = tl.where(tmp8, tmp9, tmp19)
tmp21 = tl.where(tmp3, tmp4, tmp20)
tmp23 = tmp2 < tmp2
tmp24 = tl.load(in_ptr0 + 4 * x0, tmp23 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp2 >= tmp2
tmp26 = tmp2 < tmp6
tmp27 = tmp25 & tmp26
tmp28 = tl.load(in_ptr0 + (1 + 4 * x0), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tmp2 >= tmp6
tmp30 = tmp2 < tmp11
tmp31 = tmp29 & tmp30
tmp32 = tl.load(in_ptr0 + (2 + 4 * x0), tmp31 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp2 >= tmp11
tmp35 = tl.load(in_ptr0 + (3 + 4 * x0), tmp33 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp36 = tl.where(tmp31, tmp32, tmp35)
tmp37 = tl.where(tmp27, tmp28, tmp36)
tmp38 = tl.where(tmp23, tmp24, tmp37)
tmp39 = tmp21 + tmp38
tmp41 = tmp6 < tmp2
tmp42 = tl.load(in_ptr0 + 4 * x0, tmp41 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp43 = tmp6 >= tmp2
tmp44 = tmp6 < tmp6
tmp45 = tmp43 & tmp44
tmp46 = tl.load(in_ptr0 + (1 + 4 * x0), tmp45 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp47 = tmp6 >= tmp6
tmp48 = tmp6 < tmp11
tmp49 = tmp47 & tmp48
tmp50 = tl.load(in_ptr0 + (2 + 4 * x0), tmp49 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp51 = tmp6 >= tmp11
tmp53 = tl.load(in_ptr0 + (3 + 4 * x0), tmp51 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp54 = tl.where(tmp49, tmp50, tmp53)
tmp55 = tl.where(tmp45, tmp46, tmp54)
tmp56 = tl.where(tmp41, tmp42, tmp55)
tmp57 = tmp39 + tmp56
tmp59 = tmp11 < tmp2
tmp60 = tl.load(in_ptr0 + 4 * x0, tmp59 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp61 = tmp11 >= tmp2
tmp62 = tmp11 < tmp6
tmp63 = tmp61 & tmp62
tmp64 = tl.load(in_ptr0 + (1 + 4 * x0), tmp63 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp65 = tmp11 >= tmp6
tmp66 = tmp11 < tmp11
tmp67 = tmp65 & tmp66
tmp68 = tl.load(in_ptr0 + (2 + 4 * x0), tmp67 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp69 = tmp11 >= tmp11
tmp71 = tl.load(in_ptr0 + (3 + 4 * x0), tmp69 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp72 = tl.where(tmp67, tmp68, tmp71)
tmp73 = tl.where(tmp63, tmp64, tmp72)
tmp74 = tl.where(tmp59, tmp60, tmp73)
tmp75 = tmp57 + tmp74
tmp76 = 4.0
tmp77 = tmp75 / tmp76
tl.store(out_ptr0 + x0, tmp77, xmask)
@triton.jit
def triton_poi_fused_add_repeat_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(16)](buf0, primals_2, buf1, buf2,
16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(16)](buf3, primals_5, buf4, buf5,
16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = buf3
del buf3
extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_mean_1[grid(4)](buf8, buf9, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf10 = buf8
del buf8
triton_poi_fused_add_repeat_sub_2[grid(16)](buf10, buf7, buf9, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf7
del buf9
return (buf10, primals_3, buf1, buf2, buf4, buf5, primals_8, primals_6,
primals_4)
class Dueling_DQNNew(nn.Module):
def __init__(self, args):
super().__init__()
self.state_space = args.state_space
self.fc1 = nn.Linear(self.state_space, args.hidden_size)
self.action_space = args.action_space
self.fc_h = nn.Linear(args.hidden_size, args.hidden_size)
self.fc_z_v = nn.Linear(args.hidden_size, 1)
self.fc_z_a = nn.Linear(args.hidden_size, self.action_space)
def parameter_update(self, source):
self.load_state_dict(source.state_dict())
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_3 = self.fc_h.weight
primals_5 = self.fc_h.bias
primals_6 = self.fc_z_v.weight
primals_7 = self.fc_z_v.bias
primals_4 = self.fc_z_a.weight
primals_9 = self.fc_z_a.bias
primals_8 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| wotmd5731/pseudo_random_gen | Dueling_DQN | false | 4,545 | [
"MIT"
] | 0 | f79810cd5ac79afe0a73dee73aa21bd8c01aeb9b | https://github.com/wotmd5731/pseudo_random_gen/tree/f79810cd5ac79afe0a73dee73aa21bd8c01aeb9b | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, args):
super().__init__()
self.state_space = args.state_space
self.fc1 = nn.Linear(self.state_space, args.hidden_size)
self.action_space = args.action_space
self.fc_h = nn.Linear(args.hidden_size, args.hidden_size)
self.fc_z_v = nn.Linear(args.hidden_size, 1)
self.fc_z_a = nn.Linear(args.hidden_size, self.action_space)
def forward(self, x):
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc_h(x))
v, a = self.fc_z_v(x), self.fc_z_a(x)
a_mean = torch.stack(a.chunk(self.action_space, 1), 1).mean(1)
x = v.repeat(1, self.action_space) + a - a_mean.repeat(1, self.
action_space)
return x
def parameter_update(self, source):
self.load_state_dict(source.state_dict())
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(state_space=4, hidden_size=4,
action_space=4)}]
|
DQN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/do/cdo22no4lmipk7byduyah2xsadvdcbfr22puoptl5br3l66r6jra.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 256, grid=grid(256), stream=stream0)
del primals_5
buf6 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_0.run(buf6, primals_7, buf7, buf8, 256, grid=grid(256), stream=stream0)
del buf6
del primals_7
return (buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), buf7, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class DQN(nn.Module):
def __init__(self, args):
super().__init__()
self.state_space = args.state_space
self.fc1 = nn.Linear(self.state_space, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, args.action_space)
def forward(self, x):
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
x = F.leaky_relu(self.fc3(x))
return x
def parameter_update(self, source):
self.load_state_dict(source.state_dict())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(state_space=4, hidden_size=4,
action_space=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(256)](buf3, primals_5, buf4,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf6 = buf3
del buf3
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(256)](buf6, primals_7, buf7,
buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_7
return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0
), buf7, primals_6, primals_4
class DQNNew(nn.Module):
def __init__(self, args):
super().__init__()
self.state_space = args.state_space
self.fc1 = nn.Linear(self.state_space, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, args.action_space)
def parameter_update(self, source):
self.load_state_dict(source.state_dict())
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| wotmd5731/pseudo_random_gen | DQN | false | 4,547 | [
"MIT"
] | 0 | f79810cd5ac79afe0a73dee73aa21bd8c01aeb9b | https://github.com/wotmd5731/pseudo_random_gen/tree/f79810cd5ac79afe0a73dee73aa21bd8c01aeb9b | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, args):
super().__init__()
self.state_space = args.state_space
self.fc1 = nn.Linear(self.state_space, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, args.action_space)
def forward(self, x):
x = F.leaky_relu(self.fc1(x))
x = F.leaky_relu(self.fc2(x))
x = F.leaky_relu(self.fc3(x))
return x
def parameter_update(self, source):
self.load_state_dict(source.state_dict())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(state_space=4, hidden_size=4,
action_space=4)}]
|
NearestNeighbourx4 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# interpolate => _unsafe_index
# Graph fragment:
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ff/cffwl3zagtjkvyvucvinhtofmu64rva3talauieyyjk6kx3f52zd.py
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# interpolate_1 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_2, mul_4, mul_5
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_1 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cr/ccry5ckiw6zpzk4uzmrobiloz2xqkr5gljofkrlbaof3htriaqog.py
# Topologically Sorted Source Nodes: [conv2d, x, interpolate_1], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
# Source node to ATen node mapping:
# conv2d => convolution
# interpolate_1 => _unsafe_index_1
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {})
triton_poi_fused__unsafe_index_convolution_relu_2 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x5 = (xindex // 256)
x2 = (xindex // 256) % 4
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (8*tmp4) + (64*x5)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x6), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2p/c2pnthly5nft2sfk7syx72jkh52wpxtmpvwe37syru7idzd2awrg.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tl/ctlk5m5qxd2wn3mbr4w26mcqnctg6whek6eeyzf4ciha5kx2fcs7.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/al/calfzo6phh7doawgewddfqpxxumcxvvzg2uaxzcizya2cjyj2mf7.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1))
buf2 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_1.run(buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x, interpolate_1], Original ATen: [aten.convolution, aten.relu, aten._unsafe_index]
triton_poi_fused__unsafe_index_convolution_relu_2.run(buf2, buf1, primals_3, buf3, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 16, 16), (1024, 256, 16, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf5, primals_5, 4096, grid=grid(4096), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 16, 16), (1024, 256, 16, 1))
buf7 = buf6; del buf6 # reuse
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_4.run(buf7, primals_7, buf8, 4096, grid=grid(4096), stream=stream0)
del primals_7
buf9 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_5.run(buf1, primals_3, buf9, 1024, grid=grid(1024), stream=stream0)
del buf1
del primals_3
return (buf7, primals_2, primals_4, primals_6, buf0, buf2, buf3, buf5, buf8, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class NearestNeighbourx4(nn.Module):
def __init__(self, nf, bias, custom_init=False):
super(NearestNeighbourx4, self).__init__()
self.conv0 = nn.Conv2d(nf, nf, 3, 1, 1, bias=bias)
self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=bias)
self.relu = nn.ReLU(inplace=True)
if custom_init:
for conv in [self.conv0, self.conv1, self.conv2]:
torch.nn.init.kaiming_normal_(conv.weight)
def forward(self, x):
x = self.relu(self.conv0(F.interpolate(x, scale_factor=2, mode=
'nearest')))
x = self.relu(self.conv1(F.interpolate(x, scale_factor=2, mode=
'nearest')))
x = self.relu(self.conv2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nf': 4, 'bias': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_convolution_relu_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x5 = xindex // 256
x2 = xindex // 256 % 4
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x5), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x6, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1))
buf2 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_1[grid(16)](buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_poi_fused__unsafe_index_convolution_relu_2[grid(4096)](buf2,
buf1, primals_3, buf3, 4096, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 16, 16), (1024, 256, 16, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_3[grid(4096)](buf5, primals_5,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 16, 16), (1024, 256, 16, 1))
buf7 = buf6
del buf6
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(4096)](buf7
, primals_7, buf8, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf9 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(1024)](buf1
, primals_3, buf9, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_3
return (buf7, primals_2, primals_4, primals_6, buf0, buf2, buf3, buf5,
buf8, buf9)
class NearestNeighbourx4New(nn.Module):
def __init__(self, nf, bias, custom_init=False):
super(NearestNeighbourx4New, self).__init__()
self.conv0 = nn.Conv2d(nf, nf, 3, 1, 1, bias=bias)
self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=bias)
self.relu = nn.ReLU(inplace=True)
if custom_init:
for conv in [self.conv0, self.conv1, self.conv2]:
torch.nn.init.kaiming_normal_(conv.weight)
def forward(self, input_0):
primals_2 = self.conv0.weight
primals_3 = self.conv0.bias
primals_4 = self.conv1.weight
primals_5 = self.conv1.bias
primals_6 = self.conv2.weight
primals_7 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| wsdea/EfficientSR | NearestNeighbourx4 | false | 4,548 | [
"MIT"
] | 0 | 077dea18c90e0d5bed722c609a776033c09f80e6 | https://github.com/wsdea/EfficientSR/tree/077dea18c90e0d5bed722c609a776033c09f80e6 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, nf, bias, custom_init=False):
super().__init__()
self.conv0 = nn.Conv2d(nf, nf, 3, 1, 1, bias=bias)
self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=bias)
self.relu = nn.ReLU(inplace=True)
if custom_init:
for conv in [self.conv0, self.conv1, self.conv2]:
torch.nn.init.kaiming_normal_(conv.weight)
def forward(self, x):
x = self.relu(self.conv0(F.interpolate(x, scale_factor=2, mode=
'nearest')))
x = self.relu(self.conv1(F.interpolate(x, scale_factor=2, mode=
'nearest')))
x = self.relu(self.conv2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Synthesis_prior_net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jq/cjq3glnqsfrmyvnxs4je5xfalsletumhjebstaksuh76ybonp2sj.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 36864
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = (yindex // 192)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (192*x2) + (4800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/f5/cf5odkubd5jeatzcgavikebgjzlefandddwsk4keigymtzmgzkg3.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 768
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = (yindex // 192)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (192*x2) + (3072*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/he/che2ykopu3v3q6cki4iubgqudhexfrp2tyt546xcxrhjpk52k6ix.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 36864
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = (yindex // 192)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (192*x2) + (1728*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ho/chofjocabfe3jv65vyovskegj3iqlti6m6uay2fzbrgxkuehqtuw.py
# Topologically Sorted Source Nodes: [conv_transpose2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv_transpose2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3t/c3tyjun4kl7r6m6a4h3imqyvfhk5efzvheatbgtfod4kw2pj7fdi.py
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv_transpose2d_1 => convolution_1
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 196608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gj/cgje73ekl3bnibuaunf2yk3q4mqu5245fw6vdbqzniqvq3jycquv.py
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv_transpose2d_2 => convolution_2
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.01), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 192
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 256
y3 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x1 + (192*y0)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x1 + (192*y0)), tmp4, xmask)
tl.store(out_ptr1 + (y2 + (256*x1) + (49152*y3)), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_2, (192, ), (1, ))
assert_size_stride(primals_3, (4, 192, 4, 4), (3072, 16, 4, 1))
assert_size_stride(primals_4, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_5, (192, ), (1, ))
assert_size_stride(primals_6, (192, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_7, (192, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 36864, 25, grid=grid(36864, 25), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 768, 16, grid=grid(768, 16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(primals_4, buf2, 36864, 25, grid=grid(36864, 25), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((192, 192, 3, 3), (1728, 1, 576, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_6, buf3, 36864, 9, grid=grid(36864, 9), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf4, (4, 192, 8, 8), (12288, 1, 1536, 192))
buf5 = empty_strided_cuda((4, 192, 8, 8), (12288, 1, 1536, 192), torch.bool)
buf6 = empty_strided_cuda((4, 192, 8, 8), (12288, 1, 1536, 192), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_3.run(buf4, primals_2, buf5, buf6, 49152, grid=grid(49152), stream=stream0)
del buf4
del primals_2
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf7, (4, 192, 16, 16), (49152, 1, 3072, 192))
buf8 = empty_strided_cuda((4, 192, 16, 16), (49152, 1, 3072, 192), torch.bool)
buf9 = empty_strided_cuda((4, 192, 16, 16), (49152, 1, 3072, 192), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf7, primals_5, buf8, buf9, 196608, grid=grid(196608), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 192, 16, 16), (49152, 1, 3072, 192))
buf11 = empty_strided_cuda((4, 192, 16, 16), (49152, 1, 3072, 192), torch.bool)
buf12 = reinterpret_tensor(buf7, (4, 192, 16, 16), (49152, 256, 16, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_5.run(buf10, primals_7, buf11, buf12, 1024, 192, grid=grid(1024, 192), stream=stream0)
del buf10
del primals_7
return (buf12, buf0, buf1, buf2, buf3, buf5, buf6, buf8, buf9, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((192, 192, 5, 5), (4800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 192, 4, 4), (3072, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((192, 192, 5, 5), (4800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((192, 192, 3, 3), (1728, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.utils.data
class Synthesis_prior_net(nn.Module):
"""
Decode synthesis prior
"""
def __init__(self, out_channel_N=192, out_channel_M=320):
super(Synthesis_prior_net, self).__init__()
self.deconv1 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv1.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv1.bias.data, 0.01)
self.relu1 = nn.LeakyReLU()
self.deconv2 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv2.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv2.bias.data, 0.01)
self.relu2 = nn.LeakyReLU()
self.deconv3 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 3,
stride=1, padding=1)
torch.nn.init.xavier_normal_(self.deconv3.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv3.bias.data, 0.01)
self.relu3 = nn.LeakyReLU()
def forward(self, x):
x = self.relu1(self.deconv1(x))
x = self.relu2(self.deconv2(x))
x = self.relu3(self.deconv3(x))
return x
def get_inputs():
return [torch.rand([4, 192, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 192 * x2 + 4800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 768
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 192 * x2 + 3072 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 192 * x2 + 1728 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 192
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 256
y3 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x1 + 192 * y0), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x1 + 192 * y0), tmp4, xmask)
tl.store(out_ptr1 + (y2 + 256 * x1 + 49152 * y3), tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_2, (192,), (1,))
assert_size_stride(primals_3, (4, 192, 4, 4), (3072, 16, 4, 1))
assert_size_stride(primals_4, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_5, (192,), (1,))
assert_size_stride(primals_6, (192, 192, 3, 3), (1728, 9, 3, 1))
assert_size_stride(primals_7, (192,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(36864, 25)](primals_1, buf0, 36864, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 192, 4, 4), (3072, 1, 768, 192),
torch.float32)
triton_poi_fused_1[grid(768, 16)](primals_3, buf1, 768, 16, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192),
torch.float32)
triton_poi_fused_0[grid(36864, 25)](primals_4, buf2, 36864, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((192, 192, 3, 3), (1728, 1, 576, 192),
torch.float32)
triton_poi_fused_2[grid(36864, 9)](primals_6, buf3, 36864, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf4, (4, 192, 8, 8), (12288, 1, 1536, 192))
buf5 = empty_strided_cuda((4, 192, 8, 8), (12288, 1, 1536, 192),
torch.bool)
buf6 = empty_strided_cuda((4, 192, 8, 8), (12288, 1, 1536, 192),
torch.float32)
triton_poi_fused_convolution_leaky_relu_3[grid(49152)](buf4,
primals_2, buf5, buf6, 49152, XBLOCK=512, num_warps=4, num_stages=1
)
del buf4
del primals_2
buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf7, (4, 192, 16, 16), (49152, 1, 3072, 192))
buf8 = empty_strided_cuda((4, 192, 16, 16), (49152, 1, 3072, 192),
torch.bool)
buf9 = empty_strided_cuda((4, 192, 16, 16), (49152, 1, 3072, 192),
torch.float32)
triton_poi_fused_convolution_leaky_relu_4[grid(196608)](buf7,
primals_5, buf8, buf9, 196608, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_5
buf10 = extern_kernels.convolution(buf9, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 192, 16, 16), (49152, 1, 3072, 192))
buf11 = empty_strided_cuda((4, 192, 16, 16), (49152, 1, 3072, 192),
torch.bool)
buf12 = reinterpret_tensor(buf7, (4, 192, 16, 16), (49152, 256, 16,
1), 0)
del buf7
triton_poi_fused_convolution_leaky_relu_5[grid(1024, 192)](buf10,
primals_7, buf11, buf12, 1024, 192, XBLOCK=64, YBLOCK=64,
num_warps=8, num_stages=1)
del buf10
del primals_7
return buf12, buf0, buf1, buf2, buf3, buf5, buf6, buf8, buf9, buf11
class Synthesis_prior_netNew(nn.Module):
"""
Decode synthesis prior
"""
def __init__(self, out_channel_N=192, out_channel_M=320):
super(Synthesis_prior_netNew, self).__init__()
self.deconv1 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv1.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv1.bias.data, 0.01)
self.relu1 = nn.LeakyReLU()
self.deconv2 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv2.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv2.bias.data, 0.01)
self.relu2 = nn.LeakyReLU()
self.deconv3 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 3,
stride=1, padding=1)
torch.nn.init.xavier_normal_(self.deconv3.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv3.bias.data, 0.01)
self.relu3 = nn.LeakyReLU()
def forward(self, input_0):
primals_1 = self.deconv1.weight
primals_2 = self.deconv1.bias
primals_4 = self.deconv2.weight
primals_5 = self.deconv2.bias
primals_6 = self.deconv3.weight
primals_7 = self.deconv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| wemozj/Image-Compression-based-GMM-and-Attention-Module | Synthesis_prior_net | false | 4,549 | [
"Apache-2.0"
] | 0 | 93f804dbcea8ffc1621456f3d104d0342c75373b | https://github.com/wemozj/Image-Compression-based-GMM-and-Attention-Module/tree/93f804dbcea8ffc1621456f3d104d0342c75373b | import math
import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
Decode synthesis prior
"""
def __init__(self, out_channel_N=192, out_channel_M=320):
super().__init__()
self.deconv1 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv1.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv1.bias.data, 0.01)
self.relu1 = nn.LeakyReLU()
self.deconv2 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv2.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv2.bias.data, 0.01)
self.relu2 = nn.LeakyReLU()
self.deconv3 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 3,
stride=1, padding=1)
torch.nn.init.xavier_normal_(self.deconv3.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv3.bias.data, 0.01)
self.relu3 = nn.LeakyReLU()
def forward(self, x):
x = self.relu1(self.deconv1(x))
x = self.relu2(self.deconv2(x))
x = self.relu3(self.deconv3(x))
return x
def get_inputs():
return [torch.rand([4, 192, 4, 4])]
def get_init_inputs():
return []
|
baseline_upscale | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zh/czhzsqtvwtkudgjsezbz24b3gbrvyy7q67uhkphrvaz3javkcsdg.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 8
x4 = (xindex // 64)
x2 = (xindex // 64) % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + ((4*(x1 // 2)) + (16*(x0 % 2)) + (32*(x1 % 2)) + (64*x4) + (x0 // 2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((2*(x1 % 2)) + (4*x2) + (x0 % 2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x5), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/br/cbrdjuihrm6wjlg66ptxkrtypee7by4o5fxuxj5s2vfknpfsbhhx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.1), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_3, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16) % 16
x4 = (xindex // 256)
x2 = (xindex // 256) % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + ((8*(x1 // 2)) + (64*(x0 % 2)) + (128*(x1 % 2)) + (256*x4) + (x0 // 2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((2*(x1 % 2)) + (4*x2) + (x0 % 2)), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x5), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wc/cwceyn2mpq5oc4hfti3cgdhiiw74ps4alzhd3xxq7vgrsjhbab4a.py
# Topologically Sorted Source Nodes: [conv2d_2, leaky_relu_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# leaky_relu_2 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.1), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7x/c7xnwtrjfqhdkxhfsdsjlkr7ml5ojqmtd2lrl7npuiczn7woxe2e.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_2 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 256) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (3, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, 1024, grid=grid(1024), stream=stream0)
del buf0
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 8, 8), (1024, 64, 8, 1))
buf3 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf2, primals_5, buf3, 4096, grid=grid(4096), stream=stream0)
del buf2
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 16, 16), (1024, 256, 16, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, leaky_relu_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf5, primals_7, 4096, grid=grid(4096), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 3, 16, 16), (768, 256, 16, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf7, primals_9, 3072, grid=grid(3072), stream=stream0)
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((3, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.init as init
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, torch.nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class baseline_upscale(nn.Module):
def __init__(self, nf):
super(baseline_upscale, self).__init__()
self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
self.HR_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.last_conv = nn.Conv2d(nf, 3, 3, 1, 1, bias=True)
self.pixel_shuffle = nn.PixelShuffle(2)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
initialize_weights([self.upconv1, self.upconv2, self.HR_conv, self.
last_conv], 0.1)
def forward(self, x):
x = self.lrelu(self.pixel_shuffle(self.upconv1(x)))
x = self.lrelu(self.pixel_shuffle(self.upconv2(x)))
x = self.last_conv(self.lrelu(self.HR_conv(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nf': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x4 = xindex // 64
x2 = xindex // 64 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * (x0 % 2) + 32 * (x1 % 2) +
64 * x4 + x0 // 2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 * (x1 % 2) + 4 * x2 + x0 % 2), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x5, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16 % 16
x4 = xindex // 256
x2 = xindex // 256 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (8 * (x1 // 2) + 64 * (x0 % 2) + 128 * (x1 % 2
) + 256 * x4 + x0 // 2), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 * (x1 % 2) + 4 * x2 + x0 % 2), None,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x5, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 256 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (3, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(1024)](buf0, primals_2, buf1,
1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 8, 8), (1024, 64, 8, 1))
buf3 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_poi_fused_leaky_relu_1[grid(4096)](buf2, primals_5, buf3,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 16, 16), (1024, 256, 16, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_leaky_relu_2[grid(4096)](buf5,
primals_7, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 3, 16, 16), (768, 256, 16, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(3072)](buf7, primals_9, 3072,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5)
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, torch.nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class baseline_upscaleNew(nn.Module):
def __init__(self, nf):
super(baseline_upscaleNew, self).__init__()
self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
self.HR_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.last_conv = nn.Conv2d(nf, 3, 3, 1, 1, bias=True)
self.pixel_shuffle = nn.PixelShuffle(2)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
initialize_weights([self.upconv1, self.upconv2, self.HR_conv, self.
last_conv], 0.1)
def forward(self, input_0):
primals_1 = self.upconv1.weight
primals_2 = self.upconv1.bias
primals_4 = self.upconv2.weight
primals_5 = self.upconv2.bias
primals_6 = self.HR_conv.weight
primals_7 = self.HR_conv.bias
primals_8 = self.last_conv.weight
primals_9 = self.last_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| wsdea/EfficientSR | baseline_upscale | false | 4,550 | [
"MIT"
] | 0 | 077dea18c90e0d5bed722c609a776033c09f80e6 | https://github.com/wsdea/EfficientSR/tree/077dea18c90e0d5bed722c609a776033c09f80e6 | import torch
import torch.nn as nn
import torch.nn.init as init
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, torch.nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class Model(nn.Module):
def __init__(self, nf):
super().__init__()
self.upconv1 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
self.upconv2 = nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True)
self.HR_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.last_conv = nn.Conv2d(nf, 3, 3, 1, 1, bias=True)
self.pixel_shuffle = nn.PixelShuffle(2)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
initialize_weights([self.upconv1, self.upconv2, self.HR_conv, self.
last_conv], 0.1)
def forward(self, x):
x = self.lrelu(self.pixel_shuffle(self.upconv1(x)))
x = self.lrelu(self.pixel_shuffle(self.upconv2(x)))
x = self.last_conv(self.lrelu(self.HR_conv(x)))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
QueryEncoding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zo/czoio6nsflvecb2qwhe3w75tplfaiyoo6yyqhaoz72bqpuhuhntr.py
# Topologically Sorted Source Nodes: [idx, setitem], Original ATen: [aten._to_copy, aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# idx => full_default
# setitem => copy, full_default_1
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], 1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select, %full_default_1), kwargs = {})
# %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default, %copy, 1, 0), kwargs = {})
triton_poi_fused__to_copy_fill_lift_fresh_0 = async_compile.triton('triton_poi_fused__to_copy_fill_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_fill_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mi/cmizcvmhsff6tf5i4xjvyisvdrmrwptxodpk2wr34xc6hh42qeky.py
# Topologically Sorted Source Nodes: [embedding, x], Original ATen: [aten.embedding, aten.add]
# Source node to ATen node mapping:
# embedding => embedding
# x => add
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %select_scatter_default), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %embedding), kwargs = {})
triton_poi_fused_add_embedding_1 = async_compile.triton('triton_poi_fused_add_embedding_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_embedding_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = (xindex // 16) % 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = x2
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = tmp1 == tmp2
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tl.full([1], 1, tl.int64)
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tl.load(in_ptr1 + (x0 + (4*tmp6)), xmask)
tmp8 = tmp0 + tmp7
tl.store(out_ptr0 + (x4), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [idx, setitem], Original ATen: [aten._to_copy, aten.lift_fresh, aten.fill]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_fill_lift_fresh_0.run(buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding, x], Original ATen: [aten.embedding, aten.add]
triton_poi_fused_add_embedding_1.run(primals_1, primals_2, buf1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class QueryEncoding(nn.Module):
def __init__(self, d_model):
super(QueryEncoding, self).__init__()
self.pe = nn.Embedding(2, d_model)
def forward(self, x):
B, N, L, _K = x.shape
idx = torch.ones((B, N, L), device=x.device).long()
idx[:, 0, :] = 0
x = x + self.pe(idx)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x2 = xindex // 16 % 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = x2
tmp2 = tl.full([1], 0, tl.int32)
tmp3 = tmp1 == tmp2
tmp4 = tl.full([1], 0, tl.int64)
tmp5 = tl.full([1], 1, tl.int64)
tmp6 = tl.where(tmp3, tmp4, tmp5)
tmp7 = tl.load(in_ptr1 + (x0 + 4 * tmp6), xmask)
tmp8 = tmp0 + tmp7
tl.store(out_ptr0 + x4, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_fill_lift_fresh_0[grid(64)](buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_embedding_1[grid(256)](primals_1, primals_2,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf1, buf0
class QueryEncodingNew(nn.Module):
def __init__(self, d_model):
super(QueryEncodingNew, self).__init__()
self.pe = nn.Embedding(2, d_model)
def forward(self, input_0):
primals_2 = self.pe.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| wukevin/RoseTTAFold | QueryEncoding | false | 4,551 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, d_model):
super().__init__()
self.pe = nn.Embedding(2, d_model)
def forward(self, x):
B, N, L, _K = x.shape
idx = torch.ones((B, N, L), device=x.device).long()
idx[:, 0, :] = 0
x = x + self.pe(idx)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PCN1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/o4/co4nsnmwmq6u72ocszlwnicby3irkdzg333bffvczctebolija3z.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 48
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2l/c2lopujvmnumdt346ycuertt5fmhzvjrvguon2iyn4d4fxs2achu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 512
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = (yindex // 16)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (16*x2) + (144*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wv/cwvgka2tdnkfhotjblshnd7peeqx5dbyqvmgelrsa445t7sdxarg.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3q/c3qvez2r77ch5iao37cjwlxkfudhougiy5mgqj2rvlssb3674oac.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (256*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/q4/cq4jzxm6mfzoz3s2gw4gzdejerpbfgskqaexzzt3f2ymblqxix3g.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ku/ckubedjc6xofli363opw2wv3s5l2og734gsgxuoguw3xp6n4zglw.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 28800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oh/cohrpvvogjhpj5hqyf7yuwjs7gi6atbdwjl2s3qcqn4pkpe2znbw.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cl/ccl4udr4jxpg6mwurvh2p65ejlfsvbktwf7lbnwswommioowsrm2.py
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5q/c5q5kumendubiv2gwewlgrsy5mqbp2zgrqunx4htiexaday3bklo.py
# Topologically Sorted Source Nodes: [conv2d_4, cls_prob], Original ATen: [aten.convolution, aten._softmax]
# Source node to ATen node mapping:
# cls_prob => amax, exp, sub
# conv2d_4 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%convolution_4, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_4, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_convolution_9 = async_compile.triton('triton_poi_fused__softmax_convolution_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_convolution_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_convolution_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp6 = tmp3 + tmp5
tmp10 = tmp7 + tmp9
tmp11 = triton_helpers.maximum(tmp6, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ag/cagkmfz6apmfdbpgeexfxmzfgz5cdokn35ktphlwqielpc5xh3rf.py
# Topologically Sorted Source Nodes: [cls_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# cls_prob => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_10 = async_compile.triton('triton_poi_fused__softmax_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = (yindex // 2)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (2*x2) + (72*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + ((2*x2) + (72*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (2*x2) + (72*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + (x2 + (36*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7n/c7nae5ymzhakbyjocdyhdjjitxv7vnelq2hhwlqzubk4q6u3iu7c.py
# Topologically Sorted Source Nodes: [bbox], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# bbox => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_11 = async_compile.triton('triton_poi_fused_convolution_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 64], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_11(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = (yindex // 3)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (3*x2) + (108*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (36*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (128, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (2, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_11, (2, ), (1, ))
assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_13, (2, ), (1, ))
assert_size_stride(primals_14, (3, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_15, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 48, 9, grid=grid(48, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((32, 16, 3, 3), (144, 1, 48, 16), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 512, 9, grid=grid(512, 9), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 2048, 9, grid=grid(2048, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 8192, 4, grid=grid(8192, 4), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 31, 31), (15376, 1, 496, 16))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf6, primals_2, 61504, grid=grid(61504), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 32, 15, 15), (7200, 1, 480, 32))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf8, primals_5, 28800, grid=grid(28800), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 7, 7), (3136, 1, 448, 64))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf10, primals_7, 12544, grid=grid(12544), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 128, 6, 6), (4608, 1, 768, 128))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf12, primals_9, 18432, grid=grid(18432), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 2, 6, 6), (72, 1, 12, 2))
buf14 = empty_strided_cuda((4, 2, 6, 6), (72, 1, 12, 2), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_4, cls_prob], Original ATen: [aten.convolution, aten._softmax]
triton_poi_fused__softmax_convolution_9.run(buf13, primals_11, buf14, 288, grid=grid(288), stream=stream0)
del primals_11
buf15 = reinterpret_tensor(buf13, (4, 2, 6, 6), (72, 36, 6, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [cls_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_10.run(buf14, buf15, 8, 36, grid=grid(8, 36), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 2, 6, 6), (72, 1, 12, 2))
buf17 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, rotate], Original ATen: [aten.convolution, aten._softmax]
triton_poi_fused__softmax_convolution_9.run(buf16, primals_13, buf17, 288, grid=grid(288), stream=stream0)
del primals_13
buf18 = reinterpret_tensor(buf16, (4, 2, 6, 6), (72, 36, 6, 1), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [rotate], Original ATen: [aten._softmax]
triton_poi_fused__softmax_10.run(buf17, buf18, 8, 36, grid=grid(8, 36), stream=stream0)
del buf17
# Topologically Sorted Source Nodes: [bbox], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf12, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 3, 6, 6), (108, 1, 18, 3))
buf20 = empty_strided_cuda((4, 3, 6, 6), (108, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [bbox], Original ATen: [aten.convolution]
triton_poi_fused_convolution_11.run(buf19, primals_15, buf20, 12, 36, grid=grid(12, 36), stream=stream0)
del buf19
del primals_15
return (buf15, buf18, buf20, buf0, buf1, buf2, buf3, buf4, primals_10, primals_12, primals_14, buf6, buf8, buf10, buf12, buf15, buf18, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((2, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((3, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PCN1(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, dilation=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2, stride=1)
self.rotate = nn.Conv2d(128, 2, kernel_size=1, stride=1)
self.cls_prob = nn.Conv2d(128, 2, kernel_size=1, stride=1)
self.bbox = nn.Conv2d(128, 3, kernel_size=1, stride=1)
def forward(self, x):
x = F.relu(self.conv1(x), inplace=True)
x = F.relu(self.conv2(x), inplace=True)
x = F.relu(self.conv3(x), inplace=True)
x = F.relu(self.conv4(x), inplace=True)
cls_prob = F.softmax(self.cls_prob(x), dim=1)
rotate = F.softmax(self.rotate(x), dim=1)
bbox = self.bbox(x)
return cls_prob, rotate, bbox
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 48
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 512
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 16
y1 = yindex // 16
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 16 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 256 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 28800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused__softmax_convolution_9(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp6 = tmp3 + tmp5
tmp10 = tmp7 + tmp9
tmp11 = triton_helpers.maximum(tmp6, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused__softmax_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 8
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 72 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (2 * x2 + 72 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 2 * x2 + 72 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + (x2 + 36 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_11(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 108 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 36 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (128, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (2, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_11, (2,), (1,))
assert_size_stride(primals_12, (2, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_13, (2,), (1,))
assert_size_stride(primals_14, (3, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_15, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(48, 9)](primals_1, buf0, 48, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((32, 16, 3, 3), (144, 1, 48, 16), torch.
float32)
triton_poi_fused_2[grid(512, 9)](primals_4, buf2, 512, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_3[grid(2048, 9)](primals_6, buf3, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((128, 64, 2, 2), (256, 1, 128, 64), torch
.float32)
triton_poi_fused_4[grid(8192, 4)](primals_8, buf4, 8192, 4, XBLOCK=
4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 31, 31), (15376, 1, 496, 16))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(61504)](buf6, primals_2,
61504, XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 32, 15, 15), (7200, 1, 480, 32))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_6[grid(28800)](buf8, primals_5,
28800, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 7, 7), (3136, 1, 448, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(12544)](buf10, primals_7,
12544, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf11 = extern_kernels.convolution(buf10, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 128, 6, 6), (4608, 1, 768, 128))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_8[grid(18432)](buf12, primals_9,
18432, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf13 = extern_kernels.convolution(buf12, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 2, 6, 6), (72, 1, 12, 2))
buf14 = empty_strided_cuda((4, 2, 6, 6), (72, 1, 12, 2), torch.float32)
triton_poi_fused__softmax_convolution_9[grid(288)](buf13,
primals_11, buf14, 288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf15 = reinterpret_tensor(buf13, (4, 2, 6, 6), (72, 36, 6, 1), 0)
del buf13
triton_poi_fused__softmax_10[grid(8, 36)](buf14, buf15, 8, 36,
XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1)
buf16 = extern_kernels.convolution(buf12, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 2, 6, 6), (72, 1, 12, 2))
buf17 = buf14
del buf14
triton_poi_fused__softmax_convolution_9[grid(288)](buf16,
primals_13, buf17, 288, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf18 = reinterpret_tensor(buf16, (4, 2, 6, 6), (72, 36, 6, 1), 0)
del buf16
triton_poi_fused__softmax_10[grid(8, 36)](buf17, buf18, 8, 36,
XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1)
del buf17
buf19 = extern_kernels.convolution(buf12, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 3, 6, 6), (108, 1, 18, 3))
buf20 = empty_strided_cuda((4, 3, 6, 6), (108, 36, 6, 1), torch.float32
)
triton_poi_fused_convolution_11[grid(12, 36)](buf19, primals_15,
buf20, 12, 36, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf19
del primals_15
return (buf15, buf18, buf20, buf0, buf1, buf2, buf3, buf4, primals_10,
primals_12, primals_14, buf6, buf8, buf10, buf12, buf15, buf18)
class PCN1New(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, dilation=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2, stride=1)
self.rotate = nn.Conv2d(128, 2, kernel_size=1, stride=1)
self.cls_prob = nn.Conv2d(128, 2, kernel_size=1, stride=1)
self.bbox = nn.Conv2d(128, 3, kernel_size=1, stride=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.rotate.weight
primals_11 = self.rotate.bias
primals_12 = self.cls_prob.weight
primals_13 = self.cls_prob.bias
primals_14 = self.bbox.weight
primals_15 = self.bbox.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0], output[1], output[2]
| wkdhkr/pytorch-PCN | PCN1 | false | 4,552 | [
"BSD-2-Clause"
] | 0 | 4686c8fcda0b4fe7ecd7488f5554e19e8f6a8f68 | https://github.com/wkdhkr/pytorch-PCN/tree/4686c8fcda0b4fe7ecd7488f5554e19e8f6a8f68 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, dilation=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2, stride=1)
self.rotate = nn.Conv2d(128, 2, kernel_size=1, stride=1)
self.cls_prob = nn.Conv2d(128, 2, kernel_size=1, stride=1)
self.bbox = nn.Conv2d(128, 3, kernel_size=1, stride=1)
def forward(self, x):
x = F.relu(self.conv1(x), inplace=True)
x = F.relu(self.conv2(x), inplace=True)
x = F.relu(self.conv3(x), inplace=True)
x = F.relu(self.conv4(x), inplace=True)
cls_prob = F.softmax(self.cls_prob(x), dim=1)
rotate = F.softmax(self.rotate(x), dim=1)
bbox = self.bbox(x)
return cls_prob, rotate, bbox
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
LinearNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/el/cel3ti6ei3rprs2l5m6qs62p6md67qhlcbr3oxhxsqfmherljfbo.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_5, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_5,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dq/cdqniiyy2bh7exxjlmeiulcbmupg7rxyslwm7crl3ajwwld6usn2.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_1
# Graph fragment:
# %add_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_4, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_4,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6t/c6tvczfz3yzaenwtxcn2nlcjymp7bnekhmmimgqv2po5hnx62wl4.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_2
# Graph fragment:
# %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (256, 4), (4, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (512, 256), (256, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (1024, 512), (512, 1))
assert_size_stride(primals_7, (1024, ), (1, ))
assert_size_stride(primals_8, (1024, 1024), (1024, 1))
assert_size_stride(primals_9, (1024, ), (1, ))
assert_size_stride(primals_10, (512, 1024), (1024, 1))
assert_size_stride(primals_11, (512, ), (1, ))
assert_size_stride(primals_12, (256, 512), (512, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (4, 256), (256, 1))
assert_size_stride(primals_15, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 256), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1024, grid=grid(1024), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 512), (1, 256), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 2048, grid=grid(2048), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (512, 1024), (1, 512), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf5, primals_7, 4096, grid=grid(4096), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (1024, 1024), (1, 1024), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf7, primals_9, 4096, grid=grid(4096), stream=stream0)
del primals_9
buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf7, reinterpret_tensor(primals_10, (1024, 512), (1, 1024), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf9, primals_11, 2048, grid=grid(2048), stream=stream0)
del primals_11
buf10 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_12, (512, 256), (1, 512), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf11, primals_13, 1024, grid=grid(1024), stream=stream0)
del primals_13
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, buf11, reinterpret_tensor(primals_14, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf12)
del primals_15
return (buf12, primals_1, buf1, buf3, buf5, buf7, buf9, buf11, primals_14, primals_12, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1024, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1024, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((512, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LinearNet(nn.Module):
def __init__(self, n_feature, n_output):
super(LinearNet, self).__init__()
self.fc1 = nn.Linear(n_feature, 256)
self.fc2 = nn.Linear(256, 512)
self.fc3 = nn.Linear(512, 1024)
self.fc4 = nn.Linear(1024, 1024)
self.fc5 = nn.Linear(1024, 512)
self.fc6 = nn.Linear(512, 256)
self.fc7 = nn.Linear(256, n_output)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = F.relu(self.fc3(x))
x = self.dropout(x)
x = F.relu(self.fc4(x))
x = self.dropout(x)
x = F.relu(self.fc5(x))
x = self.dropout(x)
x = F.relu(self.fc6(x))
x = self.dropout(x)
x = self.fc7(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_feature': 4, 'n_output': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (256, 4), (4, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (512, 256), (256, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (1024, 512), (512, 1))
assert_size_stride(primals_7, (1024,), (1,))
assert_size_stride(primals_8, (1024, 1024), (1024, 1))
assert_size_stride(primals_9, (1024,), (1,))
assert_size_stride(primals_10, (512, 1024), (1024, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (256, 512), (512, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (4, 256), (256, 1))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 256),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 512), (
1, 256), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(2048)](buf3, primals_5, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (512, 1024),
(1, 512), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(4096)](buf5, primals_7, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (1024, 1024),
(1, 1024), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_relu_2[grid(4096)](buf7, primals_9, 4096, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(primals_10, (1024, 512),
(1, 1024), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_1[grid(2048)](buf9, primals_11, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
buf10 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_12, (512, 256),
(1, 512), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_0[grid(1024)](buf11, primals_13, 1024, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_13
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, buf11, reinterpret_tensor(
primals_14, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf12)
del primals_15
return (buf12, primals_1, buf1, buf3, buf5, buf7, buf9, buf11,
primals_14, primals_12, primals_10, primals_8, primals_6, primals_4)
class LinearNetNew(nn.Module):
def __init__(self, n_feature, n_output):
super(LinearNetNew, self).__init__()
self.fc1 = nn.Linear(n_feature, 256)
self.fc2 = nn.Linear(256, 512)
self.fc3 = nn.Linear(512, 1024)
self.fc4 = nn.Linear(1024, 1024)
self.fc5 = nn.Linear(1024, 512)
self.fc6 = nn.Linear(512, 256)
self.fc7 = nn.Linear(256, n_output)
self.dropout = nn.Dropout(0.2)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_8 = self.fc4.weight
primals_9 = self.fc4.bias
primals_10 = self.fc5.weight
primals_11 = self.fc5.bias
primals_12 = self.fc6.weight
primals_13 = self.fc6.bias
primals_14 = self.fc7.weight
primals_15 = self.fc7.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| wslerry/regresstorch | LinearNet | false | 4,553 | [
"MIT"
] | 0 | b2e3507d8ed794e5d1d75ebfe910f74bbcb9a06b | https://github.com/wslerry/regresstorch/tree/b2e3507d8ed794e5d1d75ebfe910f74bbcb9a06b | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, n_feature, n_output):
super().__init__()
self.fc1 = nn.Linear(n_feature, 256)
self.fc2 = nn.Linear(256, 512)
self.fc3 = nn.Linear(512, 1024)
self.fc4 = nn.Linear(1024, 1024)
self.fc5 = nn.Linear(1024, 512)
self.fc6 = nn.Linear(512, 256)
self.fc7 = nn.Linear(256, n_output)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = F.relu(self.fc3(x))
x = self.dropout(x)
x = F.relu(self.fc4(x))
x = self.dropout(x)
x = F.relu(self.fc5(x))
x = self.dropout(x)
x = F.relu(self.fc6(x))
x = self.dropout(x)
x = self.fc7(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
ResidualDenseBlock_3C | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5i/c5icgszsmzzz6scnewpijmeu66hhcnz77ph7igiddh765aw6r7tr.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1572864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 96
x0 = xindex % 4096
x2 = (xindex // 393216)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (262144*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 96, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-64) + x1)) + (131072*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-64) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + (x3), tmp19, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3w/c3wk4rwnrsnr4gmyth3hio7xwmgrpmqs4uenaletytzd3iyq6vvp.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 128
x0 = xindex % 4096
x2 = (xindex // 524288)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (262144*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 96, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4096*((-64) + x1)) + (131072*x2)), tmp9, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-64) + x1), tmp9, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 128, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = tl.load(in_ptr3 + (x0 + (4096*((-96) + x1)) + (131072*x2)), tmp20, other=0.0)
tmp24 = tl.load(in_ptr4 + ((-96) + x1), tmp20, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + (x3), tmp32, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/y3/cy3i4midcri6kdml76nlj4y6tlbilzuojharw267znurvclibfhz.py
# Topologically Sorted Source Nodes: [x3, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul_2
# x3 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_3), kwargs = {})
triton_poi_fused_add_convolution_mul_2 = async_compile.triton('triton_poi_fused_add_convolution_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x3), None)
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/d7/cd7sekzjlonku7u4zcsaaag5675rcay5vlqrdbrjflu2hcdvhmix.py
# Topologically Sorted Source Nodes: [conv2d_1, x2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x2 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_4, (32, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 96, 64, 64), (393216, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_3, buf0, primals_2, buf1, 1572864, grid=grid(1572864), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_3, buf0, primals_2, buf2, primals_5, buf3, 2097152, grid=grid(2097152), stream=stream0)
# Topologically Sorted Source Nodes: [x3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x3, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add]
triton_poi_fused_add_convolution_mul_2.run(buf5, primals_7, primals_3, 1048576, grid=grid(1048576), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3.run(buf2, primals_5, buf6, 524288, grid=grid(524288), stream=stream0)
del buf2
del primals_5
buf7 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3.run(buf0, primals_2, buf7, 524288, grid=grid(524288), stream=stream0)
del buf0
del primals_2
return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf6, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 96, 3, 3), (864, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.init as init
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, torch.nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class ResidualDenseBlock_3C(nn.Module):
def __init__(self, nf=64, gc=32, bias=True):
super(ResidualDenseBlock_3C, self).__init__()
self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(nf + 2 * gc, nf, 3, 1, 1, bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
initialize_weights([self.conv1, self.conv2, self.conv3], 0.1)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.conv3(torch.cat((x, x1, x2), 1))
return x3 * 0.2 + x
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 96
x0 = xindex % 4096
x2 = xindex // 393216
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 96, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-64 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp6, tmp16, tmp17)
tmp19 = tl.where(tmp4, tmp5, tmp18)
tl.store(out_ptr0 + x3, tmp19, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 128
x0 = xindex % 4096
x2 = xindex // 524288
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 96, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 131072 * x2), tmp9,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-64 + x1), tmp9, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.0
tmp14 = tmp12 > tmp13
tmp15 = 0.2
tmp16 = tmp12 * tmp15
tmp17 = tl.where(tmp14, tmp12, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tl.full([1], 128, tl.int64)
tmp23 = tl.load(in_ptr3 + (x0 + 4096 * (-96 + x1) + 131072 * x2), tmp20,
other=0.0)
tmp24 = tl.load(in_ptr4 + (-96 + x1), tmp20, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 > tmp13
tmp27 = tmp25 * tmp15
tmp28 = tl.where(tmp26, tmp25, tmp27)
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp20, tmp28, tmp29)
tmp31 = tl.where(tmp9, tmp19, tmp30)
tmp32 = tl.where(tmp4, tmp5, tmp31)
tl.store(out_ptr0 + x3, tmp32, None)
@triton.jit
def triton_poi_fused_add_convolution_mul_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x3, None)
tmp2 = tmp0 + tmp1
tmp3 = 0.2
tmp4 = tmp2 * tmp3
tmp6 = tmp4 + tmp5
tl.store(in_out_ptr0 + x3, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_4, (32, 96, 3, 3), (864, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 96, 64, 64), (393216, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1572864)](primals_3, buf0, primals_2,
buf1, 1572864, XBLOCK=1024, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_1[grid(2097152)](primals_3, buf0, primals_2,
buf2, primals_5, buf3, 2097152, XBLOCK=1024, num_warps=4,
num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_mul_2[grid(1048576)](buf5,
primals_7, primals_3, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3[grid(
524288)](buf2, primals_5, buf6, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf2
del primals_5
buf7 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3[grid(
524288)](buf0, primals_2, buf7, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf0
del primals_2
return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3,
buf6, buf7)
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, torch.nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class ResidualDenseBlock_3CNew(nn.Module):
def __init__(self, nf=64, gc=32, bias=True):
super(ResidualDenseBlock_3CNew, self).__init__()
self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(nf + 2 * gc, nf, 3, 1, 1, bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
initialize_weights([self.conv1, self.conv2, self.conv3], 0.1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| wsdea/EfficientSR | ResidualDenseBlock_3C | false | 4,554 | [
"MIT"
] | 0 | 077dea18c90e0d5bed722c609a776033c09f80e6 | https://github.com/wsdea/EfficientSR/tree/077dea18c90e0d5bed722c609a776033c09f80e6 | import torch
import torch.nn as nn
import torch.nn.init as init
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, torch.nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class Model(nn.Module):
def __init__(self, nf=64, gc=32, bias=True):
super().__init__()
self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(nf + 2 * gc, nf, 3, 1, 1, bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
initialize_weights([self.conv1, self.conv2, self.conv3], 0.1)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.conv3(torch.cat((x, x1, x2), 1))
return x3 * 0.2 + x
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return []
|
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cf/ccfb6ec7gjxmag3ujecjz3vcmyoemkahdlpm7h6hhek4dpm3jhpe.py
# Topologically Sorted Source Nodes: [mean, var, add, std, sub, x, x_1, x_2], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# std => sqrt
# sub => sub
# var => var
# x => mul
# x_1 => div
# x_2 => add_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %sqrt), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 / tmp9
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp12 / tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, var, add, std, sub, x, x_1, x_2], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 / tmp9
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp12 / tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
def __init__(self, d_model, eps=1e-05):
super(LayerNormNew, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, input_0):
primals_2 = self.a_2
primals_3 = self.b_2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| wukevin/RoseTTAFold | LayerNorm | false | 4,555 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, d_model, eps=1e-05):
super().__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cg/ccgeyggrwjatnn72xyolgiel5jx5opfkk6aojxc7vj2fl7mzhxkt.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 8
x3 = (xindex // 128)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (32*x1) + (128*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ut/cutioxpozohxld7suyz35rbmr3jityexhfxm3jifg5gap564gweg.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 32
y1 = (yindex // 32)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (32*x2) + (128*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ic/cicrq5pxpvj2ulyewbc5dtvpbbh52nuwnwntw63r3254uun2czvo.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/d2/cd2tzxoj6qyacqcakrmgzt7bvwbnjpz3zietppzw2tbanup3un7q.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6f/c6fejvwx3uipcn6sk47lzukzhjoniygbfkexqu53il2mhn7vy2ot.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 8
x2 = (xindex // 32) % 4
x3 = (xindex // 128)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (128*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (32, 4), (4, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (32, 4), (4, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 4), (4, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (4, 32), (32, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 32), (1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 32), (1, 4), 0), out=buf1)
del primals_6
buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 32), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_5, buf3, 512, grid=grid(512), stream=stream0)
del primals_5
buf4 = reinterpret_tensor(buf0, (4, 8, 4, 4), (128, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, primals_7, buf4, 128, 4, grid=grid(128, 4), stream=stream0)
del primals_7
buf5 = reinterpret_tensor(buf1, (32, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (32, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (32, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 512, grid=grid(512), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 8, 4, 4), (128, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 512, grid=grid(512), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_9, buf8, 512, grid=grid(512), stream=stream0)
del primals_9
buf9 = reinterpret_tensor(buf2, (32, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (32, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (32, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 512, grid=grid(512), stream=stream0)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 32), (32, 1), 0), reinterpret_tensor(primals_10, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf11)
del primals_11
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 32), (32, 1), 0), primals_10, reinterpret_tensor(buf8, (32, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (32, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (32, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.temperature = temperature
self.dropout = nn.Dropout(p=dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q, k.transpose(2, 3)) / self.temperature
if mask is not None:
attn = attn.masked_fill(mask=mask, value=float('-inf'))
attn = torch.softmax(attn, dim=-1)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
return out, attn
class MultiHeadAttention(nn.Module):
def __init__(self, in_channels, k_channels, v_channels, n_head=8,
dropout=0.1):
super(MultiHeadAttention, self).__init__()
self.in_channels = in_channels
self.k_channels = k_channels
self.v_channels = v_channels
self.n_head = n_head
self.q_linear = nn.Linear(in_channels, n_head * k_channels)
self.k_linear = nn.Linear(in_channels, n_head * k_channels)
self.v_linear = nn.Linear(in_channels, n_head * v_channels)
self.attention = ScaledDotProductAttention(temperature=k_channels **
0.5, dropout=dropout)
self.out_linear = nn.Linear(n_head * v_channels, in_channels)
self.dropout = nn.Dropout(p=dropout)
def forward(self, q, k, v, mask=None):
b, q_len, k_len, v_len = q.size(0), q.size(1), k.size(1), v.size(1)
q = self.q_linear(q).view(b, q_len, self.n_head, self.k_channels
).transpose(1, 2)
k = self.k_linear(k).view(b, k_len, self.n_head, self.k_channels
).transpose(1, 2)
v = self.v_linear(v).view(b, v_len, self.n_head, self.v_channels
).transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
out, attn = self.attention(q, k, v, mask=mask)
out = out.transpose(1, 2).contiguous().view(b, q_len, self.n_head *
self.v_channels)
out = self.out_linear(out)
out = self.dropout(out)
return out, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'in_channels': 4, 'k_channels': 4, 'v_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 8
x3 = xindex // 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 32 * x1 + 128 * x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x4, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 32
y1 = yindex // 32
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 32 * x2 + 128 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 8
x2 = xindex // 32 % 4
x3 = xindex // 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 128 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (32, 4), (4, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 4), (4, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 4), (4, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (4, 32), (32, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 32), (1, 4), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 32), (1, 4), 0), out=buf1)
del primals_6
buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 32), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(512)](buf0, primals_5, buf3, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = reinterpret_tensor(buf0, (4, 8, 4, 4), (128, 16, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(128, 4)](buf1, primals_7, buf4, 128,
4, XBLOCK=4, YBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf5 = reinterpret_tensor(buf1, (32, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (32, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (32, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(512)](buf5, buf6, 512, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 8, 4, 4), (128, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_3[grid(512)](buf6, buf7, 512, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_clone_0[grid(512)](buf2, primals_9, buf8, 512,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf9 = reinterpret_tensor(buf2, (32, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (32, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (32, 4, 4), (16, 4, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32
)
triton_poi_fused_clone_4[grid(512)](buf9, buf10, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del buf9
buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 32),
(32, 1), 0), reinterpret_tensor(primals_10, (32, 4), (1, 32), 0
), alpha=1, beta=1, out=buf11)
del primals_11
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), buf7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 32), (32, 1), 0
), primals_10, reinterpret_tensor(buf8, (32, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (32, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (32, 4, 4), (16, 1, 4), 0)
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.temperature = temperature
self.dropout = nn.Dropout(p=dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q, k.transpose(2, 3)) / self.temperature
if mask is not None:
attn = attn.masked_fill(mask=mask, value=float('-inf'))
attn = torch.softmax(attn, dim=-1)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
return out, attn
class MultiHeadAttentionNew(nn.Module):
def __init__(self, in_channels, k_channels, v_channels, n_head=8,
dropout=0.1):
super(MultiHeadAttentionNew, self).__init__()
self.in_channels = in_channels
self.k_channels = k_channels
self.v_channels = v_channels
self.n_head = n_head
self.q_linear = nn.Linear(in_channels, n_head * k_channels)
self.k_linear = nn.Linear(in_channels, n_head * k_channels)
self.v_linear = nn.Linear(in_channels, n_head * v_channels)
self.attention = ScaledDotProductAttention(temperature=k_channels **
0.5, dropout=dropout)
self.out_linear = nn.Linear(n_head * v_channels, in_channels)
self.dropout = nn.Dropout(p=dropout)
def forward(self, input_0, input_1, input_2):
primals_4 = self.q_linear.weight
primals_5 = self.q_linear.bias
primals_6 = self.k_linear.weight
primals_7 = self.k_linear.bias
primals_8 = self.v_linear.weight
primals_9 = self.v_linear.bias
primals_10 = self.out_linear.weight
primals_11 = self.out_linear.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
| wu0004in/vedastr | MultiHeadAttention | false | 4,557 | [
"Apache-2.0"
] | 0 | 83511a408b68c264561a30daff5154cd0148bebd | https://github.com/wu0004in/vedastr/tree/83511a408b68c264561a30daff5154cd0148bebd | import torch
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature, dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(p=dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q, k.transpose(2, 3)) / self.temperature
if mask is not None:
attn = attn.masked_fill(mask=mask, value=float('-inf'))
attn = torch.softmax(attn, dim=-1)
attn = self.dropout(attn)
out = torch.matmul(attn, v)
return out, attn
class Model(nn.Module):
def __init__(self, in_channels, k_channels, v_channels, n_head=8,
dropout=0.1):
super().__init__()
self.in_channels = in_channels
self.k_channels = k_channels
self.v_channels = v_channels
self.n_head = n_head
self.q_linear = nn.Linear(in_channels, n_head * k_channels)
self.k_linear = nn.Linear(in_channels, n_head * k_channels)
self.v_linear = nn.Linear(in_channels, n_head * v_channels)
self.attention = ScaledDotProductAttention(temperature=k_channels **
0.5, dropout=dropout)
self.out_linear = nn.Linear(n_head * v_channels, in_channels)
self.dropout = nn.Dropout(p=dropout)
def forward(self, q, k, v, mask=None):
b, q_len, k_len, v_len = q.size(0), q.size(1), k.size(1), v.size(1)
q = self.q_linear(q).view(b, q_len, self.n_head, self.k_channels
).transpose(1, 2)
k = self.k_linear(k).view(b, k_len, self.n_head, self.k_channels
).transpose(1, 2)
v = self.v_linear(v).view(b, v_len, self.n_head, self.v_channels
).transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1)
out, attn = self.attention(q, k, v, mask=mask)
out = out.transpose(1, 2).contiguous().view(b, q_len, self.n_head *
self.v_channels)
out = self.out_linear(out)
out = self.dropout(out)
return out, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [4, 4, 4]
|
FFN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zi/czi6taqk3yywywfl3iwbejutxysbxi6hrg6s2rrrevzoemnmagnw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_3, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
# Source node to ATen node mapping:
# linear_1 => view_5
# Graph fragment:
# %view_5 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_3, [64, 4]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_rate=0.0, use_relu=True):
super(FC, self).__init__()
self.dropout_r = dropout_rate
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_rate > 0:
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLP(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_rate=0.0,
use_relu=True):
super(MLP, self).__init__()
self.fc = FC(in_size, mid_size, dropout_rate=dropout_rate, use_relu
=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
class FFN(nn.Module):
def __init__(self, opt):
super(FFN, self).__init__()
self.mlp = MLP(in_size=opt.hidden_size, mid_size=opt.ff_size,
out_size=opt.hidden_size, dropout_rate=opt.dropout_rate,
use_relu=True)
def forward(self, x):
return self.mlp(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(hidden_size=4, ff_size=4, dropout_rate
=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, primals_4, buf4
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_rate=0.0, use_relu=True):
super(FC, self).__init__()
self.dropout_r = dropout_rate
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_rate > 0:
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLP(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_rate=0.0,
use_relu=True):
super(MLP, self).__init__()
self.fc = FC(in_size, mid_size, dropout_rate=dropout_rate, use_relu
=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
class FFNNew(nn.Module):
def __init__(self, opt):
super(FFNNew, self).__init__()
self.mlp = MLP(in_size=opt.hidden_size, mid_size=opt.ff_size,
out_size=opt.hidden_size, dropout_rate=opt.dropout_rate,
use_relu=True)
def forward(self, input_0):
primals_1 = self.mlp.fc.linear.weight
primals_2 = self.mlp.fc.linear.bias
primals_4 = self.mlp.linear.weight
primals_5 = self.mlp.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Originofamonia/mcan-vqa | FFN | false | 4,558 | [
"Apache-2.0"
] | 0 | e7e9fdc654d72dbbcbc03e43ae8a59c16b6d10d1 | https://github.com/Originofamonia/mcan-vqa/tree/e7e9fdc654d72dbbcbc03e43ae8a59c16b6d10d1 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_rate=0.0, use_relu=True):
super().__init__()
self.dropout_r = dropout_rate
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_rate > 0:
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLP(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_rate=0.0,
use_relu=True):
super().__init__()
self.fc = FC(in_size, mid_size, dropout_rate=dropout_rate, use_relu
=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
class Model(nn.Module):
def __init__(self, opt):
super().__init__()
self.mlp = MLP(in_size=opt.hidden_size, mid_size=opt.ff_size,
out_size=opt.hidden_size, dropout_rate=opt.dropout_rate,
use_relu=True)
def forward(self, x):
return self.mlp(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(hidden_size=4, ff_size=4, dropout_rate
=0.5)}]
|
CoevolExtractor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/le/clemd3fjnkby5w36zezyjaq4ygcdl6zeuha7yam2oijv4yjilswb.py
# Topologically Sorted Source Nodes: [mean, var, add, std, sub, x, x_1, x_2], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# std => sqrt
# sub => sub
# var => var
# x => mul
# x_1 => div
# x_2 => add_1
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%view_4, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view_4, [-1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sub), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %sqrt), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_4), kwargs = {})
triton_per_fused_add_div_mean_mul_sqrt_sub_var_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_sqrt_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sqrt_sub_var_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (16*(r2 // 4)) + (64*x1) + (r2 % 4)), xmask, other=0.0)
tmp25 = tl.load(in_ptr1 + (r2), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (r2), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = tmp18 / tmp19
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp26 = tmp0 - tmp20
tmp27 = tmp25 * tmp26
tmp28 = tmp27 / tmp24
tmp30 = tmp28 + tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp24, xmask)
tl.store(out_ptr0 + (r2 + (16*x3)), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (16, ), (1, ))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [pair], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1), 0), out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, var, add, std, sub, x, x_1, x_2], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_0.run(buf2, buf6, buf0, primals_3, primals_4, buf7, 64, 16, grid=grid(64), stream=stream0)
del primals_3
del primals_4
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pair_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf7, (64, 16), (16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf8)
del primals_6
return (reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf0, buf2, buf6, reinterpret_tensor(buf7, (64, 16), (16, 1), 0), primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class CoevolExtractor(nn.Module):
def __init__(self, n_feat_proj, n_feat_out, p_drop=0.1):
super(CoevolExtractor, self).__init__()
self.norm_2d = LayerNorm(n_feat_proj * n_feat_proj)
self.proj_2 = nn.Linear(n_feat_proj ** 2, n_feat_out)
def forward(self, x_down, x_down_w):
B, _N, L = x_down.shape[:3]
pair = torch.einsum('abij,ablm->ailjm', x_down, x_down_w)
pair = pair.reshape(B, L, L, -1)
pair = self.norm_2d(pair)
pair = self.proj_2(pair)
return pair
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_feat_proj': 4, 'n_feat_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_0(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * (r2 // 4) + 64 * x1 + r2 % 4),
xmask, other=0.0)
tmp25 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = tmp18 / tmp19
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp26 = tmp0 - tmp20
tmp27 = tmp25 * tmp26
tmp28 = tmp27 / tmp24
tmp30 = tmp28 + tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp24, xmask)
tl.store(out_ptr0 + (r2 + 16 * x3), tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (4, 16), (16, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (64, 1,
16), 0), reinterpret_tensor(primals_2, (4, 4, 16), (64, 16, 1),
0), out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_0[grid(64)](buf2,
buf6, buf0, primals_3, primals_4, buf7, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_3
del primals_4
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf7, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_5, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf8)
del primals_6
return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf0, buf2, buf6, reinterpret_tensor(buf7, (64, 16), (16, 1), 0
), primals_5
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class CoevolExtractorNew(nn.Module):
def __init__(self, n_feat_proj, n_feat_out, p_drop=0.1):
super(CoevolExtractorNew, self).__init__()
self.norm_2d = LayerNorm(n_feat_proj * n_feat_proj)
self.proj_2 = nn.Linear(n_feat_proj ** 2, n_feat_out)
def forward(self, input_0, input_1):
primals_3 = self.norm_2d.a_2
primals_4 = self.norm_2d.b_2
primals_5 = self.proj_2.weight
primals_6 = self.proj_2.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| wukevin/RoseTTAFold | CoevolExtractor | false | 4,559 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super().__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class Model(nn.Module):
def __init__(self, n_feat_proj, n_feat_out, p_drop=0.1):
super().__init__()
self.norm_2d = LayerNorm(n_feat_proj * n_feat_proj)
self.proj_2 = nn.Linear(n_feat_proj ** 2, n_feat_out)
def forward(self, x_down, x_down_w):
B, _N, L = x_down.shape[:3]
pair = torch.einsum('abij,ablm->ailjm', x_down, x_down_w)
pair = pair.reshape(B, L, L, -1)
pair = self.norm_2d(pair)
pair = self.proj_2(pair)
return pair
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DirectMultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/a3/ca3c423eelt6wuklohapyjrb6r2xhk3je6u3r7sngu36te4ofgnb.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pi/cpi2qgxxpr3iglvzzn3zoivoji4bc5wuycbhtkpuz55mte6rzjac.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
x2 = xindex % 4
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x4) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x4 + (16*y5)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2s/c2s3zo6qtbodb6bdwv46ozxj4nxxymp76igm7emvdafvrj3673sn.py
# Topologically Sorted Source Nodes: [tgt_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# tgt_1 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_4,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5a/c5ainvzw5uinosmtjpfsqml2wdleittg5twoainmhbtwdqayr4sw.py
# Topologically Sorted Source Nodes: [tgt_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# tgt_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_8), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf2, primals_6, buf3, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_6
buf4 = reinterpret_tensor(buf2, (4, 1, 4, 4, 4), (64, 1, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf1, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt_1], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf5, buf6, 64, 4, grid=grid(64, 4), stream=stream0)
buf7 = reinterpret_tensor(buf5, (64, 4), (4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [tgt_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [tgt_1], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf8, primals_8, 256, grid=grid(256), stream=stream0)
del primals_8
return (buf8, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf6, (64, 4), (4, 1), 0), primals_7, reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class DirectMultiheadAttention(nn.Module):
def __init__(self, d_in, d_out, heads, dropout=0.1):
super(DirectMultiheadAttention, self).__init__()
self.heads = heads
self.proj_pair = nn.Linear(d_in, heads)
self.drop = nn.Dropout(dropout, inplace=True)
self.proj_msa = nn.Linear(d_out, d_out)
self.proj_out = nn.Linear(d_out, d_out)
def forward(self, src, tgt):
B, N, L = tgt.shape[:3]
attn_map = F.softmax(self.proj_pair(src), dim=1).permute(0, 3, 1, 2)
attn_map = self.drop(attn_map).unsqueeze(1)
value = self.proj_msa(tgt).permute(0, 3, 1, 2).contiguous().view(B,
-1, self.heads, N, L)
tgt = torch.matmul(value, attn_map).view(B, -1, N, L).permute(0, 2,
3, 1)
tgt = self.proj_out(tgt)
return tgt
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_in': 4, 'd_out': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y0 = yindex % 4
y1 = yindex // 4
x2 = xindex % 4
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x4 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x4 + 16 * y5), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 16)](buf2, primals_6, buf3, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_6
buf4 = reinterpret_tensor(buf2, (4, 1, 4, 4, 4), (64, 1, 16, 4, 1), 0)
del buf2
triton_poi_fused_clone_2[grid(16, 16)](buf1, buf4, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(64, 4)](buf5, buf6, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (64, 4), (4, 1), 0)
del buf5
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused_add_4[grid(256)](buf8, primals_8, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_8
return buf8, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf6, (64, 4), (4, 1), 0
), primals_7, reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0)
class DirectMultiheadAttentionNew(nn.Module):
def __init__(self, d_in, d_out, heads, dropout=0.1):
super(DirectMultiheadAttentionNew, self).__init__()
self.heads = heads
self.proj_pair = nn.Linear(d_in, heads)
self.drop = nn.Dropout(dropout, inplace=True)
self.proj_msa = nn.Linear(d_out, d_out)
self.proj_out = nn.Linear(d_out, d_out)
def forward(self, input_0, input_1):
primals_2 = self.proj_pair.weight
primals_3 = self.proj_pair.bias
primals_5 = self.proj_msa.weight
primals_6 = self.proj_msa.bias
primals_7 = self.proj_out.weight
primals_8 = self.proj_out.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| wukevin/RoseTTAFold | DirectMultiheadAttention | false | 4,560 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, d_in, d_out, heads, dropout=0.1):
super().__init__()
self.heads = heads
self.proj_pair = nn.Linear(d_in, heads)
self.drop = nn.Dropout(dropout, inplace=True)
self.proj_msa = nn.Linear(d_out, d_out)
self.proj_out = nn.Linear(d_out, d_out)
def forward(self, src, tgt):
B, N, L = tgt.shape[:3]
attn_map = F.softmax(self.proj_pair(src), dim=1).permute(0, 3, 1, 2)
attn_map = self.drop(attn_map).unsqueeze(1)
value = self.proj_msa(tgt).permute(0, 3, 1, 2).contiguous().view(B,
-1, self.heads, N, L)
tgt = torch.matmul(value, attn_map).view(B, -1, N, L).permute(0, 2,
3, 1)
tgt = self.proj_out(tgt)
return tgt
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
PVABlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3u/c3ub52l73zdv4klgqzgxmtzrzxvztuyczv2jksnvrjr7erq7guxd.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# linear_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2r/c2rfym7x5d6gfe43hgp23gbkfnsow7ttgn2hby3xsrg6fgja77us.py
# Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# tanh => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %unsqueeze), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x2 = (xindex // 8192) % 4
x3 = (xindex // 32768)
x4 = xindex % 8192
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4 + (8192*x3)), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + (x5), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/g2/cg266bzrczdgrg4t2bizcm563ududawebjvz37s7yg2panbw22xg.py
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# att_1 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 512), (512, 1))
assert_size_stride(primals_3, (512, 512), (512, 1))
assert_size_stride(primals_4, (512, 4), (4, 1))
assert_size_stride(primals_5, (1, 512), (512, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, reinterpret_tensor(primals_3, (512, 512), (1, 512), 0), out=buf0)
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf1, 64, 4, grid=grid(64, 4), stream=stream0)
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 512), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 16, 512), (32768, 8192, 512, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh]
triton_poi_fused_add_tanh_1.run(buf0, buf2, buf3, 131072, grid=grid(131072), stream=stream0)
del buf0
del buf2
buf4 = empty_strided_cuda((256, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (256, 512), (512, 1), 0), reinterpret_tensor(primals_5, (512, 1), (1, 512), 0), out=buf4)
buf7 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
triton_per_fused__softmax_2.run(buf4, buf7, 16, 16, grid=grid(16), stream=stream0)
del buf4
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), out=buf8)
return (buf8, primals_2, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, buf7, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), primals_5, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((512, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 512), (512, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def constant_init(module, val, bias=0):
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module, a=0, is_rnn=False, mode='fan_in', nonlinearity=
'leaky_relu', bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
if is_rnn:
for name, param in module.named_parameters():
if 'bias' in name:
nn.init.constant_(param, bias)
elif 'weight' in name:
nn.init.kaiming_uniform_(param, a=a, mode=mode,
nonlinearity=nonlinearity)
else:
nn.init.kaiming_uniform_(module.weight, a=a, mode=mode,
nonlinearity=nonlinearity)
elif is_rnn:
for name, param in module.named_parameters():
if 'bias' in name:
nn.init.constant_(param, bias)
elif 'weight' in name:
nn.init.kaiming_normal_(param, a=a, mode=mode, nonlinearity
=nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, a=a, mode=mode, nonlinearity
=nonlinearity)
if not is_rnn and hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def init_weights(modules):
for m in modules:
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
xavier_init(m)
elif isinstance(m, (nn.LSTM, nn.LSTMCell)):
kaiming_init(m, is_rnn=True)
class PVABlock(nn.Module):
def __init__(self, num_steps, in_channels, embedding_channels=512,
inner_channels=512):
super(PVABlock, self).__init__()
self.num_steps = num_steps
self.in_channels = in_channels
self.inner_channels = inner_channels
self.embedding_channels = embedding_channels
self.order_embeddings = nn.Parameter(torch.randn(self.num_steps,
self.embedding_channels), requires_grad=True)
self.v_linear = nn.Linear(self.in_channels, self.inner_channels,
bias=False)
self.o_linear = nn.Linear(self.embedding_channels, self.
inner_channels, bias=False)
self.e_linear = nn.Linear(self.inner_channels, 1, bias=False)
init_weights(self.modules())
def forward(self, x):
b, c, h, w = x.size()
x = x.reshape(b, c, h * w).permute(0, 2, 1)
o_out = self.o_linear(self.order_embeddings).view(1, self.num_steps,
1, self.inner_channels)
v_out = self.v_linear(x).unsqueeze(1)
att = self.e_linear(torch.tanh(o_out + v_out)).squeeze(3)
att = torch.softmax(att, dim=2)
out = torch.bmm(att, x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_steps': 4, 'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 512
x2 = xindex // 8192 % 4
x3 = xindex // 32768
x4 = xindex % 8192
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x2), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x4 + 8192 * x3), None, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + x5, tmp3, None)
@triton.jit
def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 512), (512, 1))
assert_size_stride(primals_3, (512, 512), (512, 1))
assert_size_stride(primals_4, (512, 4), (4, 1))
assert_size_stride(primals_5, (1, 512), (512, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_3, (512,
512), (1, 512), 0), out=buf0)
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf1, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 512), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 16, 512), (32768, 8192, 512, 1),
torch.float32)
triton_poi_fused_add_tanh_1[grid(131072)](buf0, buf2, buf3, 131072,
XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del buf2
buf4 = empty_strided_cuda((256, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (256, 512), (512, 1), 0),
reinterpret_tensor(primals_5, (512, 1), (1, 512), 0), out=buf4)
buf7 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_2[grid(16)](buf4, buf7, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf4
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(primals_1, (4, 16, 4),
(64, 1, 16), 0), out=buf8)
return buf8, primals_2, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf3, buf7, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0
), primals_5, primals_3
def constant_init(module, val, bias=0):
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module, a=0, is_rnn=False, mode='fan_in', nonlinearity=
'leaky_relu', bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
if is_rnn:
for name, param in module.named_parameters():
if 'bias' in name:
nn.init.constant_(param, bias)
elif 'weight' in name:
nn.init.kaiming_uniform_(param, a=a, mode=mode,
nonlinearity=nonlinearity)
else:
nn.init.kaiming_uniform_(module.weight, a=a, mode=mode,
nonlinearity=nonlinearity)
elif is_rnn:
for name, param in module.named_parameters():
if 'bias' in name:
nn.init.constant_(param, bias)
elif 'weight' in name:
nn.init.kaiming_normal_(param, a=a, mode=mode, nonlinearity
=nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, a=a, mode=mode, nonlinearity
=nonlinearity)
if not is_rnn and hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def init_weights(modules):
for m in modules:
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
xavier_init(m)
elif isinstance(m, (nn.LSTM, nn.LSTMCell)):
kaiming_init(m, is_rnn=True)
class PVABlockNew(nn.Module):
def __init__(self, num_steps, in_channels, embedding_channels=512,
inner_channels=512):
super(PVABlockNew, self).__init__()
self.num_steps = num_steps
self.in_channels = in_channels
self.inner_channels = inner_channels
self.embedding_channels = embedding_channels
self.order_embeddings = nn.Parameter(torch.randn(self.num_steps,
self.embedding_channels), requires_grad=True)
self.v_linear = nn.Linear(self.in_channels, self.inner_channels,
bias=False)
self.o_linear = nn.Linear(self.embedding_channels, self.
inner_channels, bias=False)
self.e_linear = nn.Linear(self.inner_channels, 1, bias=False)
init_weights(self.modules())
def forward(self, input_0):
primals_2 = self.order_embeddings
primals_4 = self.v_linear.weight
primals_3 = self.o_linear.weight
primals_5 = self.e_linear.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| wu0004in/vedastr | PVABlock | false | 4,561 | [
"Apache-2.0"
] | 0 | 83511a408b68c264561a30daff5154cd0148bebd | https://github.com/wu0004in/vedastr/tree/83511a408b68c264561a30daff5154cd0148bebd | import torch
import torch.nn as nn
def constant_init(module, val, bias=0):
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module, a=0, is_rnn=False, mode='fan_in', nonlinearity=
'leaky_relu', bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
if is_rnn:
for name, param in module.named_parameters():
if 'bias' in name:
nn.init.constant_(param, bias)
elif 'weight' in name:
nn.init.kaiming_uniform_(param, a=a, mode=mode,
nonlinearity=nonlinearity)
else:
nn.init.kaiming_uniform_(module.weight, a=a, mode=mode,
nonlinearity=nonlinearity)
elif is_rnn:
for name, param in module.named_parameters():
if 'bias' in name:
nn.init.constant_(param, bias)
elif 'weight' in name:
nn.init.kaiming_normal_(param, a=a, mode=mode, nonlinearity
=nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, a=a, mode=mode, nonlinearity
=nonlinearity)
if not is_rnn and hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def init_weights(modules):
for m in modules:
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
xavier_init(m)
elif isinstance(m, (nn.LSTM, nn.LSTMCell)):
kaiming_init(m, is_rnn=True)
class Model(nn.Module):
def __init__(self, num_steps, in_channels, embedding_channels=512,
inner_channels=512):
super().__init__()
self.num_steps = num_steps
self.in_channels = in_channels
self.inner_channels = inner_channels
self.embedding_channels = embedding_channels
self.order_embeddings = nn.Parameter(torch.randn(self.num_steps,
self.embedding_channels), requires_grad=True)
self.v_linear = nn.Linear(self.in_channels, self.inner_channels,
bias=False)
self.o_linear = nn.Linear(self.embedding_channels, self.
inner_channels, bias=False)
self.e_linear = nn.Linear(self.inner_channels, 1, bias=False)
init_weights(self.modules())
def forward(self, x):
b, c, h, w = x.size()
x = x.reshape(b, c, h * w).permute(0, 2, 1)
o_out = self.o_linear(self.order_embeddings).view(1, self.num_steps,
1, self.inner_channels)
v_out = self.v_linear(x).unsqueeze(1)
att = self.e_linear(torch.tanh(o_out + v_out)).squeeze(3)
att = torch.softmax(att, dim=2)
out = torch.bmm(att, x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5j/c5jll3kxtd32cl7pwubrb5oky2mtzckfgip2xbwad7crvvp4zk4r.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kt/cktnex5febczl2ac6zugjmcksgsd5kjdufazv65vtepuwob3cb7a.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf5, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_8, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_11
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiheadAttention(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super(MultiheadAttention, self).__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value):
batch, L = query.shape[:2]
q = self.to_query(query).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, self.d_k).permute(0,
2, 1, 3)
v = self.to_value(value).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
attention = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.matmul(attention, v)
out = out.permute(0, 2, 1, 3).contiguous().view(batch, L, -1)
out = self.to_out(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_model': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf8, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_8
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_11, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_11
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_10
class MultiheadAttentionNew(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super(MultiheadAttentionNew, self).__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1, input_2):
primals_2 = self.to_query.weight
primals_3 = self.to_query.bias
primals_4 = self.to_key.weight
primals_5 = self.to_key.bias
primals_7 = self.to_value.weight
primals_8 = self.to_value.bias
primals_10 = self.to_out.weight
primals_11 = self.to_out.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| wukevin/RoseTTAFold | MultiheadAttention | false | 4,562 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super().__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value):
batch, L = query.shape[:2]
q = self.to_query(query).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, self.d_k).permute(0,
2, 1, 3)
v = self.to_value(value).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
attention = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.matmul(attention, v)
out = out.permute(0, 2, 1, 3).contiguous().view(batch, L, -1)
out = self.to_out(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [4, 4]
|
MaskedDirectMultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/72/c72xtjgr6nvxzysrosrcqo53ibrppbb7vty5aypjwf5wncc3t4ja.py
# Topologically Sorted Source Nodes: [q_1, attention], Original ATen: [aten.mul, aten.clone]
# Source node to ATen node mapping:
# attention => clone
# q_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_1, 0.17677669529663687), kwargs = {})
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_mul_0 = async_compile.triton('triton_poi_fused_clone_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128) % 4
x2 = (xindex // 512) % 4
x3 = (xindex // 2048)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x2) + (512*x1) + (2048*x3)), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.17677669529663687
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x4), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mf/cmfkdxaa2qcclyatyf7htbhhyc2ulc7gs7yhktl27b7px42fmnvo.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attention => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y2 = (yindex // 512)
y4 = yindex % 512
y0 = yindex % 128
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y4 + (512*x3) + (2048*y2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3 + (4*y5)), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xm/cxm36pdymyhtxbusb3hoz5tg4kfpa7um5wycqczyzntysjohm32r.py
# Topologically Sorted Source Nodes: [lt], Original ATen: [aten.lt]
# Source node to ATen node mapping:
# lt => lt
# Graph fragment:
# %lt : [num_users=2] = call_function[target=torch.ops.aten.lt.Scalar](args = (%primals_10, 0.5), kwargs = {})
triton_poi_fused_lt_2 = async_compile.triton('triton_poi_fused_lt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_lt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_lt_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 < tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ww/cwwfncbvrsyih3rporgaoo5bbz3rdvrx4v2kwc5wi6ch75x4fnxd.py
# Topologically Sorted Source Nodes: [attention_1, attention_2], Original ATen: [aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attention_1 => full_default, where
# attention_2 => amax, exp, sub, sum_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -3.4028234663852886e+38), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%lt, %full_default, %view_11), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_masked_fill_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_masked_fill_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp5 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp13 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = -3.4028234663852886e+38
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp6 = tl.where(tmp4, tmp2, tmp5)
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp10 = tl.where(tmp8, tmp2, tmp9)
tmp11 = triton_helpers.maximum(tmp7, tmp10)
tmp14 = tl.where(tmp12, tmp2, tmp13)
tmp15 = triton_helpers.maximum(tmp11, tmp14)
tmp16 = tmp3 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp6 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp10 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + (x0), tmp15, xmask)
tl.store(out_ptr1 + (x0), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/id/cidf3gczoc5743ifzxfd7xwukbraxiqnfxx2b7lyx2nj22mwh33v.py
# Topologically Sorted Source Nodes: [attention_1, attention_2], Original ATen: [aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attention_1 => full_default, where
# attention_2 => amax, div, exp, sub
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -3.4028234663852886e+38), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%lt, %full_default, %view_11), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_masked_fill_4 = async_compile.triton('triton_poi_fused__softmax_masked_fill_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_masked_fill_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = -3.4028234663852886e+38
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lf/clfz7xrqgyz5iicmre3secjcsevj4hcbkh22re5a7kfzfgtu5a3w.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_10,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = (xindex // 4)
y0 = yindex % 4
y1 = (yindex // 4)
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x3) + (16*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x5 + (16*y4)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sg/csgcdnzirukvuhbipfao25ehmm65qit3rhi44eq6cj2rfu5gmxey.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_12,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = (xindex // 4)
y0 = yindex % 4
y1 = (yindex // 4)
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x3) + (16*x2) + (64*y1)), xmask & ymask)
tl.store(out_ptr0 + (x5 + (16*y4)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 4), (4, 1))
assert_size_stride(primals_3, (128, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (128, 4), (4, 1))
assert_size_stride(primals_6, (128, ), (1, ))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 128), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 128), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_1, attention], Original ATen: [aten.mul, aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_mul_0.run(buf0, primals_3, buf3, 8192, grid=grid(8192), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 128, 4), (2048, 512, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, primals_6, buf4, 2048, 4, grid=grid(2048, 4), stream=stream0)
del buf1
del primals_6
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 128), (512, 128, 1), 0), reinterpret_tensor(buf4, (16, 128, 4), (512, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [lt], Original ATen: [aten.lt]
triton_poi_fused_lt_2.run(primals_10, buf6, 256, grid=grid(256), stream=stream0)
del primals_10
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attention_1, attention_2], Original ATen: [aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_masked_fill_3.run(buf6, buf5, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [attention_1, attention_2], Original ATen: [aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_masked_fill_4.run(buf9, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0)
del buf7
del buf8
buf10 = empty_strided_cuda((4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf2, primals_9, buf10, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_9
buf11 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf11, buf12, 16, 16, grid=grid(16, 16), stream=stream0)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_12
return (reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf6, buf9, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_11, reinterpret_tensor(buf10, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 128, 4), (512, 1, 128), 0), reinterpret_tensor(buf4, (16, 4, 128), (512, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MaskedDirectMultiheadAttention(nn.Module):
def __init__(self, d_in, d_out, heads, d_k=32, dropout=0.1):
super(MaskedDirectMultiheadAttention, self).__init__()
self.heads = heads
self.scaling = 1 / math.sqrt(d_k)
self.to_query = nn.Linear(d_in, heads * d_k)
self.to_key = nn.Linear(d_in, heads * d_k)
self.to_value = nn.Linear(d_out, d_out)
self.to_out = nn.Linear(d_out, d_out)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, query, key, value, mask):
batch, N, L = value.shape[:3]
q = self.to_query(query).view(batch, L, self.heads, -1).permute(0,
2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, -1).permute(0, 2, 1, 3)
v = self.to_value(value).view(batch, N, L, self.heads, -1).permute(
0, 3, 1, 2, 4)
q = q * self.scaling
attention = torch.matmul(q, k.transpose(-2, -1))
attention = attention.masked_fill(mask < 0.5, torch.finfo(q.dtype).min)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.einsum('bhij,bhnjk->bhnik', attention, v)
out = out.permute(0, 2, 3, 1, 4).contiguous().view(batch, N, L, -1)
out = self.to_out(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_in': 4, 'd_out': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 4
x2 = xindex // 512 % 4
x3 = xindex // 2048
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x2 + 512 * x1 + 2048 * x3), None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.17677669529663687
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, None)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y2 = yindex // 512
y4 = yindex % 512
y0 = yindex % 128
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y4 + 512 * x3 + 2048 * y2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3 + 4 * y5), tmp2, xmask)
@triton.jit
def triton_poi_fused_lt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 < tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp13 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = -3.4028234663852886e+38
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp6 = tl.where(tmp4, tmp2, tmp5)
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp10 = tl.where(tmp8, tmp2, tmp9)
tmp11 = triton_helpers.maximum(tmp7, tmp10)
tmp14 = tl.where(tmp12, tmp2, tmp13)
tmp15 = triton_helpers.maximum(tmp11, tmp14)
tmp16 = tmp3 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp6 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp10 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + x0, tmp15, xmask)
tl.store(out_ptr1 + x0, tmp26, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_4(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = -3.4028234663852886e+38
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = xindex // 4
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x5 + 16 * y4), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = xindex // 4
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (128, 4), (4, 1))
assert_size_stride(primals_3, (128,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (128, 4), (4, 1))
assert_size_stride(primals_6, (128,), (1,))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 128), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_7, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 128), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_mul_0[grid(8192)](buf0, primals_3, buf3,
8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 128, 4), (2048, 512, 4, 1), 0)
del buf0
triton_poi_fused_clone_1[grid(2048, 4)](buf1, primals_6, buf4, 2048,
4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del buf1
del primals_6
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 128), (512, 128,
1), 0), reinterpret_tensor(buf4, (16, 128, 4), (512, 4, 1), 0),
out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_lt_2[grid(256)](primals_10, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_10
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_masked_fill_3[grid(64)](buf6, buf5, buf7,
buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_masked_fill_4[grid(256)](buf9, buf6, buf7,
buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf7
del buf8
buf10 = empty_strided_cuda((4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1),
torch.float32)
triton_poi_fused_clone_5[grid(16, 16)](buf2, primals_9, buf10, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_9
buf11 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), out=buf11
)
buf12 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1),
torch.float32)
triton_poi_fused_clone_6[grid(16, 16)](buf11, buf12, 16, 16, XBLOCK
=16, YBLOCK=16, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_12
return reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_7, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf6, buf9, reinterpret_tensor(buf12, (64, 4), (4, 1), 0
), primals_11, reinterpret_tensor(buf10, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (16, 128, 4), (512, 1, 128), 0
), reinterpret_tensor(buf4, (16, 4, 128), (512, 1, 4), 0)
class MaskedDirectMultiheadAttentionNew(nn.Module):
def __init__(self, d_in, d_out, heads, d_k=32, dropout=0.1):
super(MaskedDirectMultiheadAttentionNew, self).__init__()
self.heads = heads
self.scaling = 1 / math.sqrt(d_k)
self.to_query = nn.Linear(d_in, heads * d_k)
self.to_key = nn.Linear(d_in, heads * d_k)
self.to_value = nn.Linear(d_out, d_out)
self.to_out = nn.Linear(d_out, d_out)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, input_0, input_1, input_2, input_3):
primals_2 = self.to_query.weight
primals_3 = self.to_query.bias
primals_5 = self.to_key.weight
primals_6 = self.to_key.bias
primals_8 = self.to_value.weight
primals_9 = self.to_value.bias
primals_11 = self.to_out.weight
primals_12 = self.to_out.bias
primals_1 = input_0
primals_4 = input_1
primals_7 = input_2
primals_10 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| wukevin/RoseTTAFold | MaskedDirectMultiheadAttention | false | 4,563 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, d_in, d_out, heads, d_k=32, dropout=0.1):
super().__init__()
self.heads = heads
self.scaling = 1 / math.sqrt(d_k)
self.to_query = nn.Linear(d_in, heads * d_k)
self.to_key = nn.Linear(d_in, heads * d_k)
self.to_value = nn.Linear(d_out, d_out)
self.to_out = nn.Linear(d_out, d_out)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, query, key, value, mask):
batch, N, L = value.shape[:3]
q = self.to_query(query).view(batch, L, self.heads, -1).permute(0,
2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, -1).permute(0, 2, 1, 3)
v = self.to_value(value).view(batch, N, L, self.heads, -1).permute(
0, 3, 1, 2, 4)
q = q * self.scaling
attention = torch.matmul(q, k.transpose(-2, -1))
attention = attention.masked_fill(mask < 0.5, torch.finfo(q.dtype).min)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.einsum('bhij,bhnjk->bhnik', attention, v)
out = out.permute(0, 2, 3, 1, 4).contiguous().view(batch, N, L, -1)
out = self.to_out(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
SequenceWeight | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# linear => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%unsqueeze,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ah/cahcbdgzcypclgrmenrcgftl53kemvcm53v6yoxzwdqjyblrincb.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# linear_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ad/cadtm4lcte6qs5ulex4r4w7d3wtldz4zzb4rjgsklp6cmsyteojd.py
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# q_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, 1.0), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rd/crdh5ujmhpdosgpl5dfckxnbrxrnmwgjpsx4glojxh4cleu4s6vu.py
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# k => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_4,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/iv/civdgwzpphyda4rs4fr3g6w25bprv7bn4anqgivrgzavi7xr5pdl.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_8, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_8, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_1, buf2, 256, grid=grid(256), stream=stream0)
del primals_1
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
del primals_4
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf4, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf5 = empty_strided_cuda((4, 4, 4, 1, 4), (64, 16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf3, primals_5, buf5, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_5
buf6 = reinterpret_tensor(buf3, (64, 1, 4), (4, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (64, 1, 1), (1, 0, 0), 0), reinterpret_tensor(buf5, (64, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 1, 4), (64, 16, 4, 256, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_5.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
del buf7
return (buf8, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf4, (64, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf5, (64, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class SequenceWeight(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super(SequenceWeight, self).__init__()
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.scale = 1.0 / math.sqrt(self.d_k)
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, msa):
B, N, L = msa.shape[:3]
msa = msa.permute(0, 2, 1, 3)
tar_seq = msa[:, :, 0].unsqueeze(2)
q = self.to_query(tar_seq).view(B, L, 1, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
k = self.to_key(msa).view(B, L, N, self.heads, self.d_k).permute(0,
1, 3, 4, 2).contiguous()
q = q * self.scale
attn = torch.matmul(q, k)
attn = F.softmax(attn, dim=-1)
return self.dropout(attn)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](primals_1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_1
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
del primals_4
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0)
del buf1
triton_poi_fused_mul_2[grid(64)](buf4, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((4, 4, 4, 1, 4), (64, 16, 4, 4, 1), torch
.float32)
triton_poi_fused_clone_3[grid(64, 4)](buf3, primals_5, buf5, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_5
buf6 = reinterpret_tensor(buf3, (64, 1, 4), (4, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf4, (64, 1, 1), (1, 0, 0),
0), reinterpret_tensor(buf5, (64, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 1, 4), (64, 16, 4, 256, 1),
torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0)
del buf6
triton_poi_fused__softmax_5[grid(256)](buf7, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf7
return buf8, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf4, (64, 1, 1), (1, 1, 1), 0
), reinterpret_tensor(buf5, (64, 4, 1), (4, 1, 4), 0)
class SequenceWeightNew(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super(SequenceWeightNew, self).__init__()
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.scale = 1.0 / math.sqrt(self.d_k)
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, input_0):
primals_2 = self.to_query.weight
primals_3 = self.to_query.bias
primals_4 = self.to_key.weight
primals_5 = self.to_key.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| wukevin/RoseTTAFold | SequenceWeight | false | 4,564 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.scale = 1.0 / math.sqrt(self.d_k)
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, msa):
B, N, L = msa.shape[:3]
msa = msa.permute(0, 2, 1, 3)
tar_seq = msa[:, :, 0].unsqueeze(2)
q = self.to_query(tar_seq).view(B, L, 1, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
k = self.to_key(msa).view(B, L, N, self.heads, self.d_k).permute(0,
1, 3, 4, 2).contiguous()
q = q * self.scale
attn = torch.matmul(q, k)
attn = F.softmax(attn, dim=-1)
return self.dropout(attn)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MFM2_1 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/pv/cpv3dpswnezwozpz7mrpzlo7hs65vm7hcy33ktshkygqecqb4wfq.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {})
triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_max_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class MFM2_1(torch.nn.Module):
"""Max-Feature-Map (MFM) 2/1 operation. """
def forward(self, input):
input = input.reshape((input.shape[0], 2, -1, *input.shape[2:]))
output = input.max(dim=1)[0]
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MFM2_1New(torch.nn.Module):
"""Max-Feature-Map (MFM) 2/1 operation. """
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| x6rulin/TP-GAN | MFM2_1 | false | 4,565 | [
"MIT"
] | 0 | 1716cf06aaff8a6a2cee2548ec662dcdd68c0449 | https://github.com/x6rulin/TP-GAN/tree/1716cf06aaff8a6a2cee2548ec662dcdd68c0449 | import torch
class Model(torch.nn.Module):
"""Max-Feature-Map (MFM) 2/1 operation. """
def forward(self, input):
input = input.reshape((input.shape[0], 2, -1, *input.shape[2:]))
output = input.max(dim=1)[0]
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Spatial_Attention_layer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/eh/ceheq5ns3kg3p6tebb47gdy475c5v4keklf245jl4fgbpvugznm5.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0), out=buf1)
buf2 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf2
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Spatial_Attention_layer(nn.Module):
"""
compute spatial attention scores
"""
def __init__(self, dropout=0.0):
super(Spatial_Attention_layer, self).__init__()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
"""
:param x: (batch_size, N, T, F_in)
:return: (batch_size, T, N, N)
"""
batch_size, num_of_vertices, num_of_timesteps, in_channels = x.shape
x = x.permute(0, 2, 1, 3).reshape((-1, num_of_vertices, in_channels))
score = torch.matmul(x, x.transpose(1, 2)) / math.sqrt(in_channels)
score = self.dropout(F.softmax(score, dim=-1))
return score.reshape((batch_size, num_of_timesteps, num_of_vertices,
num_of_vertices))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0), out=buf1)
buf2 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf2
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0),
class Spatial_Attention_layerNew(nn.Module):
"""
compute spatial attention scores
"""
def __init__(self, dropout=0.0):
super(Spatial_Attention_layerNew, self).__init__()
self.dropout = nn.Dropout(p=dropout)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| wxh453751461/Gformer | Spatial_Attention_layer | false | 4,566 | [
"Apache-2.0"
] | 0 | a033eb6fce59ceacc61a76430010805023ac230f | https://github.com/wxh453751461/Gformer/tree/a033eb6fce59ceacc61a76430010805023ac230f | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Model(nn.Module):
"""
compute spatial attention scores
"""
def __init__(self, dropout=0.0):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
"""
:param x: (batch_size, N, T, F_in)
:return: (batch_size, T, N, N)
"""
batch_size, num_of_vertices, num_of_timesteps, in_channels = x.shape
x = x.permute(0, 2, 1, 3).reshape((-1, num_of_vertices, in_channels))
score = torch.matmul(x, x.transpose(1, 2)) / math.sqrt(in_channels)
score = self.dropout(F.softmax(score, dim=-1))
return score.reshape((batch_size, num_of_timesteps, num_of_vertices,
num_of_vertices))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SubpixelConvolutionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/qv/cqvhvt2srxzlyjtv7yvbl72icnvz67gwv5qk2a6g6brxq6eqjycx.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_2 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 8
x5 = (xindex // 64)
x2 = (xindex // 64) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((4*(x1 // 2)) + (16*(x0 % 2)) + (32*(x1 % 2)) + (64*x5) + (x0 // 2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((2*(x1 % 2)) + (4*x2) + (x0 % 2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf0, primals_2, buf1, buf2, 1024, grid=grid(1024), stream=stream0)
del buf0
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class SubpixelConvolutionLayer(nn.Module):
def __init__(self, channels: 'int') ->None:
"""
Args:
channels (int): Number of channels in the input image.
"""
super(SubpixelConvolutionLayer, self).__init__()
self.conv = nn.Conv2d(channels, channels * 4, 3, 1, 1)
self.pixel_shuffle = nn.PixelShuffle(2)
self.relu = nn.ReLU()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
out = self.conv(x)
out = self.pixel_shuffle(out)
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x5 = xindex // 64
x2 = xindex // 64 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (x1 // 2) + 16 * (x0 % 2) + 32 * (x1 % 2) +
64 * x5 + x0 // 2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 * (x1 % 2) + 4 * x2 + x0 % 2), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (16, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 4, 4), (256, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf0,
primals_2, buf1, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf1, primals_1, primals_3, buf2
class SubpixelConvolutionLayerNew(nn.Module):
def __init__(self, channels: 'int') ->None:
"""
Args:
channels (int): Number of channels in the input image.
"""
super(SubpixelConvolutionLayerNew, self).__init__()
self.conv = nn.Conv2d(channels, channels * 4, 3, 1, 1)
self.pixel_shuffle = nn.PixelShuffle(2)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| wuyushuwys/SRGAN-PyTorch | SubpixelConvolutionLayer | false | 4,567 | [
"Apache-2.0"
] | 0 | 3a4aaaf7b55692264fca8451e4401466fcb1f39a | https://github.com/wuyushuwys/SRGAN-PyTorch/tree/3a4aaaf7b55692264fca8451e4401466fcb1f39a | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, channels: 'int') ->None:
"""
Args:
channels (int): Number of channels in the input image.
"""
super().__init__()
self.conv = nn.Conv2d(channels, channels * 4, 3, 1, 1)
self.pixel_shuffle = nn.PixelShuffle(2)
self.relu = nn.ReLU()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
out = self.conv(x)
out = self.pixel_shuffle(out)
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Synthesis_net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/4n/c4nqo5grlrit5lmmzyvvr4b4jaxbu4da4u3recsuxjhj4z33ebee.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 61440
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = (yindex // 192)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (192*x2) + (4800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rs/crschneg3kvye3kuwch7myvnxivcwiau22prtweju442z2v6tr7s.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1280
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = (yindex // 320)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (320*x2) + (5120*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ii/ciillaxyhgye3tvsrtkgjmpxnrkn67tpnzlawb2zuma4x3u5wdlo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 36864
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = (yindex // 192)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (192*x2) + (4800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kd/ckd3zxo3eoxbqovhr35s3zxwuhhw3ndu4jrxzfm7775qyiuynzfo.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 576
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (75*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k4/ck4v5mrrogvczni3qdwtiec3hzxl2msvobvw7cdbwfz2vw75wg7h.py
# Topologically Sorted Source Nodes: [conv_transpose2d, pow_3], Original ATen: [aten.convolution, aten.pow]
# Source node to ATen node mapping:
# conv_transpose2d => convolution
# pow_3 => pow_3
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
# %pow_3 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%convolution, 2), kwargs = {})
triton_poi_fused_convolution_pow_4 = async_compile.triton('triton_poi_fused_convolution_pow_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_pow_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_pow_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6g/c6gbex743ouacies5yjrzwvxv4d6uicjzoy47ao3vs5ue4iej5kk.py
# Topologically Sorted Source Nodes: [gamma, pow_2, gamma_1], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub]
# Source node to ATen node mapping:
# gamma => full_default_1, maximum_1
# gamma_1 => sub_1
# pow_2 => pow_2
# Graph fragment:
# %full_default_1 : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([192, 192], 3.814697265625e-06), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_5, %full_default_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum_1, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, 1.4551915228366852e-11), kwargs = {})
triton_poi_fused_maximum_mul_pow_sub_5 = async_compile.triton('triton_poi_fused_maximum_mul_pow_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_maximum_mul_pow_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_maximum_mul_pow_sub_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 3.814697265625e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + (x0), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ma/cmaubmvm6x5ray2d4ualpba23p4vufvwun5ed5sp2ksk5bd35xlb.py
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_, norm__1, outputs], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution, aten.sqrt]
# Source node to ATen node mapping:
# beta => full_default, maximum
# beta_1 => sub
# norm_ => convolution_1
# norm__1 => sqrt
# outputs => mul_2
# pow_1 => pow_1
# Graph fragment:
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([192], 0.0010000072652474046), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_4, %full_default), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, 1.4551915228366852e-11), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%pow_3, %view, %sub, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%convolution_1,), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sqrt), kwargs = {})
triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_6 = async_compile.triton('triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x2), None)
tmp2 = 0.0010000072652474046
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1.4551915228366852e-11
tmp6 = tmp4 - tmp5
tmp7 = tmp0 + tmp6
tmp9 = libdevice.sqrt(tmp7)
tmp10 = tmp8 * tmp9
tl.store(in_out_ptr0 + (x2), tmp7, None)
tl.store(out_ptr0 + (x2), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/x4/cx4ac643qojszmjtzwt6ozkxaamlordjcbqdqd6iouokmhwufttr.py
# Topologically Sorted Source Nodes: [conv_transpose2d_1, pow_6], Original ATen: [aten.convolution, aten.pow]
# Source node to ATen node mapping:
# conv_transpose2d_1 => convolution_2
# pow_6 => pow_6
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_2, %primals_6, %primals_7, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
# %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%convolution_2, 2), kwargs = {})
triton_poi_fused_convolution_pow_7 = async_compile.triton('triton_poi_fused_convolution_pow_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_pow_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_pow_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 196608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tb/ctb5lb7ljulmsfb6wsmeoaxme5qlnejo4wywpvrz76r4vl67djhz.py
# Topologically Sorted Source Nodes: [gamma, gamma_3, pow_5, gamma_4], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.ge]
# Source node to ATen node mapping:
# gamma => full_default_1
# gamma_3 => maximum_3
# gamma_4 => sub_3
# pow_5 => pow_5
# Graph fragment:
# %full_default_1 : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([192, 192], 3.814697265625e-06), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_3 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_9, %full_default_1), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum_3, 2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_5, 1.4551915228366852e-11), kwargs = {})
# %pow_14 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum_3, 1.0), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_14, 2.0), kwargs = {})
# %ge_2 : [num_users=1] = call_function[target=torch.ops.aten.ge.Tensor](args = (%primals_9, %full_default_1), kwargs = {})
triton_poi_fused_ge_maximum_mul_pow_sub_8 = async_compile.triton('triton_poi_fused_ge_maximum_mul_pow_sub_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ge_maximum_mul_pow_sub_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_ge_maximum_mul_pow_sub_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 3.814697265625e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tmp6 = 2.0
tmp7 = tmp2 * tmp6
tmp8 = tmp0 >= tmp1
tl.store(out_ptr0 + (x0), tmp5, None)
tl.store(out_ptr1 + (x0), tmp7, None)
tl.store(out_ptr2 + (x0), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sk/cskr6dg7zebx6ayzi6uxfz77bly2f4mgpujcytypuxkeer3rje5a.py
# Topologically Sorted Source Nodes: [beta, beta_2, pow_4, beta_3, norm__2, norm__3, outputs_1], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution, aten.sqrt]
# Source node to ATen node mapping:
# beta => full_default
# beta_2 => maximum_2
# beta_3 => sub_2
# norm__2 => convolution_3
# norm__3 => sqrt_1
# outputs_1 => mul_5
# pow_4 => pow_4
# Graph fragment:
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([192], 0.0010000072652474046), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_2 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_8, %full_default), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum_2, 2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_4, 1.4551915228366852e-11), kwargs = {})
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%pow_6, %view_1, %sub_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%convolution_3,), kwargs = {})
# %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, %sqrt_1), kwargs = {})
triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_9 = async_compile.triton('triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 196608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x2), None)
tmp2 = 0.0010000072652474046
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1.4551915228366852e-11
tmp6 = tmp4 - tmp5
tmp7 = tmp0 + tmp6
tmp9 = libdevice.sqrt(tmp7)
tmp10 = tmp8 * tmp9
tl.store(in_out_ptr0 + (x2), tmp7, None)
tl.store(out_ptr0 + (x2), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yk/cykmdry7fdyxl4owrvajfjc2fcgy6hagpeat3wnqwvv53rcr2v6c.py
# Topologically Sorted Source Nodes: [conv_transpose2d_2, pow_9], Original ATen: [aten.convolution, aten.pow]
# Source node to ATen node mapping:
# conv_transpose2d_2 => convolution_4
# pow_9 => pow_9
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_5, %primals_10, %primals_11, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
# %pow_9 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%convolution_4, 2), kwargs = {})
triton_poi_fused_convolution_pow_10 = async_compile.triton('triton_poi_fused_convolution_pow_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_pow_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_pow_10(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + (x2), tmp2, None)
tl.store(out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3v/c3vv5mwzilryp2l3jqqicod2tskfjauwm3mst66y37vpfqw7b5pc.py
# Topologically Sorted Source Nodes: [beta, beta_4, pow_7, beta_5, norm__4, norm__5, outputs_2], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution, aten.sqrt]
# Source node to ATen node mapping:
# beta => full_default
# beta_4 => maximum_4
# beta_5 => sub_4
# norm__4 => convolution_5
# norm__5 => sqrt_2
# outputs_2 => mul_8
# pow_7 => pow_7
# Graph fragment:
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([192], 0.0010000072652474046), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_4 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_12, %full_default), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum_4, 2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_7, 1.4551915228366852e-11), kwargs = {})
# %convolution_5 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%pow_9, %view_2, %sub_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%convolution_5,), kwargs = {})
# %mul_8 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, %sqrt_2), kwargs = {})
triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_11 = async_compile.triton('triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_11(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x2), None)
tmp2 = 0.0010000072652474046
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1.4551915228366852e-11
tmp6 = tmp4 - tmp5
tmp7 = tmp0 + tmp6
tmp9 = libdevice.sqrt(tmp7)
tmp10 = tmp8 * tmp9
tl.store(in_out_ptr0 + (x2), tmp7, None)
tl.store(out_ptr0 + (x2), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fg/cfgrlsu4faeplr4s3vhd7hx4eh6myzadnc4cayzvcnm2n5blp5wk.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_8, %primals_14, %primals_15, [2, 2], [2, 2], [1, 1], True, [1, 1], 1), kwargs = {})
triton_poi_fused_convolution_12 = async_compile.triton('triton_poi_fused_convolution_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_12(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 3
y1 = (yindex // 3)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (3*x2) + (12288*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp2, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qt/cqtikzj3dhv4hmlzfc4qpxybeeekea326e3s6dj4ojfaeizdpbed.py
# Topologically Sorted Source Nodes: [beta, beta_4], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.ge]
# Source node to ATen node mapping:
# beta => full_default
# beta_4 => maximum_4
# Graph fragment:
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([192], 0.0010000072652474046), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_4 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_12, %full_default), kwargs = {})
# %pow_12 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum_4, 1.0), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_12, 2.0), kwargs = {})
# %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Tensor](args = (%primals_12, %full_default), kwargs = {})
triton_poi_fused_ge_maximum_mul_pow_13 = async_compile.triton('triton_poi_fused_ge_maximum_mul_pow_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ge_maximum_mul_pow_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_ge_maximum_mul_pow_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0010000072652474046
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 >= tmp1
tl.store(out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (320, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_2, (192, ), (1, ))
assert_size_stride(primals_3, (4, 320, 4, 4), (5120, 16, 4, 1))
assert_size_stride(primals_4, (192, ), (1, ))
assert_size_stride(primals_5, (192, 192), (192, 1))
assert_size_stride(primals_6, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_7, (192, ), (1, ))
assert_size_stride(primals_8, (192, ), (1, ))
assert_size_stride(primals_9, (192, 192), (192, 1))
assert_size_stride(primals_10, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_11, (192, ), (1, ))
assert_size_stride(primals_12, (192, ), (1, ))
assert_size_stride(primals_13, (192, 192), (192, 1))
assert_size_stride(primals_14, (192, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_15, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((320, 192, 5, 5), (4800, 1, 960, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 61440, 25, grid=grid(61440, 25), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 320, 4, 4), (5120, 1, 1280, 320), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 1280, 16, grid=grid(1280, 16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_6, buf2, 36864, 25, grid=grid(36864, 25), stream=stream0)
del primals_6
buf3 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_10, buf3, 36864, 25, grid=grid(36864, 25), stream=stream0)
del primals_10
buf4 = empty_strided_cuda((192, 3, 5, 5), (75, 1, 15, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_14, buf4, 576, 25, grid=grid(576, 25), stream=stream0)
del primals_14
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf5, (4, 192, 8, 8), (12288, 1, 1536, 192))
buf6 = buf5; del buf5 # reuse
buf8 = empty_strided_cuda((4, 192, 8, 8), (12288, 1, 1536, 192), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d, pow_3], Original ATen: [aten.convolution, aten.pow]
triton_poi_fused_convolution_pow_4.run(buf6, primals_2, buf8, 49152, grid=grid(49152), stream=stream0)
del primals_2
buf7 = empty_strided_cuda((192, 192), (192, 1), torch.float32)
# Topologically Sorted Source Nodes: [gamma, pow_2, gamma_1], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub]
triton_poi_fused_maximum_mul_pow_sub_5.run(primals_5, buf7, 36864, grid=grid(36864), stream=stream0)
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution]
buf9 = extern_kernels.convolution(buf8, reinterpret_tensor(buf7, (192, 192, 1, 1), (192, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 192, 8, 8), (12288, 1, 1536, 192))
buf10 = buf9; del buf9 # reuse
buf11 = empty_strided_cuda((4, 192, 8, 8), (12288, 1, 1536, 192), torch.float32)
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_, norm__1, outputs], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution, aten.sqrt]
triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_6.run(buf10, primals_4, buf6, buf11, 49152, grid=grid(49152), stream=stream0)
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, buf2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf12, (4, 192, 16, 16), (49152, 1, 3072, 192))
buf13 = buf12; del buf12 # reuse
buf15 = empty_strided_cuda((4, 192, 16, 16), (49152, 1, 3072, 192), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d_1, pow_6], Original ATen: [aten.convolution, aten.pow]
triton_poi_fused_convolution_pow_7.run(buf13, primals_7, buf15, 196608, grid=grid(196608), stream=stream0)
del primals_7
buf14 = empty_strided_cuda((192, 192), (192, 1), torch.float32)
buf32 = empty_strided_cuda((192, 192), (192, 1), torch.float32)
buf33 = empty_strided_cuda((192, 192), (192, 1), torch.bool)
# Topologically Sorted Source Nodes: [gamma, gamma_3, pow_5, gamma_4], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.ge]
triton_poi_fused_ge_maximum_mul_pow_sub_8.run(primals_9, buf14, buf32, buf33, 36864, grid=grid(36864), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [beta, beta_2, pow_4, beta_3, norm__2], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution]
buf16 = extern_kernels.convolution(buf15, reinterpret_tensor(buf14, (192, 192, 1, 1), (192, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 192, 16, 16), (49152, 1, 3072, 192))
buf17 = buf16; del buf16 # reuse
buf18 = empty_strided_cuda((4, 192, 16, 16), (49152, 1, 3072, 192), torch.float32)
# Topologically Sorted Source Nodes: [beta, beta_2, pow_4, beta_3, norm__2, norm__3, outputs_1], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution, aten.sqrt]
triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_9.run(buf17, primals_8, buf13, buf18, 196608, grid=grid(196608), stream=stream0)
# Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, buf3, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf19, (4, 192, 32, 32), (196608, 1, 6144, 192))
buf20 = buf19; del buf19 # reuse
buf22 = empty_strided_cuda((4, 192, 32, 32), (196608, 1, 6144, 192), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d_2, pow_9], Original ATen: [aten.convolution, aten.pow]
triton_poi_fused_convolution_pow_10.run(buf20, primals_11, buf22, 786432, grid=grid(786432), stream=stream0)
del primals_11
buf21 = empty_strided_cuda((192, 192), (192, 1), torch.float32)
buf28 = empty_strided_cuda((192, 192), (192, 1), torch.float32)
buf29 = empty_strided_cuda((192, 192), (192, 1), torch.bool)
# Topologically Sorted Source Nodes: [gamma, gamma_6, pow_8, gamma_7], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.ge]
triton_poi_fused_ge_maximum_mul_pow_sub_8.run(primals_13, buf21, buf28, buf29, 36864, grid=grid(36864), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [beta, beta_4, pow_7, beta_5, norm__4], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution]
buf23 = extern_kernels.convolution(buf22, reinterpret_tensor(buf21, (192, 192, 1, 1), (192, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 192, 32, 32), (196608, 1, 6144, 192))
buf24 = buf23; del buf23 # reuse
buf25 = empty_strided_cuda((4, 192, 32, 32), (196608, 1, 6144, 192), torch.float32)
# Topologically Sorted Source Nodes: [beta, beta_4, pow_7, beta_5, norm__4, norm__5, outputs_2], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution, aten.sqrt]
triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_11.run(buf24, primals_12, buf20, buf25, 786432, grid=grid(786432), stream=stream0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, buf4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf26, (4, 3, 64, 64), (12288, 1, 192, 3))
buf27 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_12.run(buf26, primals_15, buf27, 12, 4096, grid=grid(12, 4096), stream=stream0)
del buf26
del primals_15
buf30 = empty_strided_cuda((192, ), (1, ), torch.float32)
buf31 = empty_strided_cuda((192, ), (1, ), torch.bool)
# Topologically Sorted Source Nodes: [beta, beta_4], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.ge]
triton_poi_fused_ge_maximum_mul_pow_13.run(primals_12, buf30, buf31, 192, grid=grid(192), stream=stream0)
del primals_12
buf34 = empty_strided_cuda((192, ), (1, ), torch.float32)
buf35 = empty_strided_cuda((192, ), (1, ), torch.bool)
# Topologically Sorted Source Nodes: [beta, beta_2], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.ge]
triton_poi_fused_ge_maximum_mul_pow_13.run(primals_8, buf34, buf35, 192, grid=grid(192), stream=stream0)
del primals_8
return (buf27, buf0, buf1, primals_4, primals_5, buf2, buf3, buf4, buf6, reinterpret_tensor(buf7, (192, 192, 1, 1), (192, 1, 1, 1), 0), buf8, buf10, buf11, buf13, reinterpret_tensor(buf14, (192, 192, 1, 1), (192, 1, 1, 1), 0), buf15, buf17, buf18, buf20, reinterpret_tensor(buf21, (192, 192, 1, 1), (192, 1, 1, 1), 0), buf22, buf24, buf25, buf28, buf29, buf30, buf31, buf32, buf33, buf34, buf35, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((320, 192, 5, 5), (4800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 320, 4, 4), (5120, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((192, 192), (192, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((192, 192, 5, 5), (4800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((192, 192), (192, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((192, 192, 5, 5), (4800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((192, 192), (192, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((192, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import math
import torch
import torch.nn as nn
import torch.utils.data
class LowerBound(Function):
@staticmethod
def forward(ctx, inputs, bound):
b = torch.ones_like(inputs) * bound
ctx.save_for_backward(inputs, b)
return torch.max(inputs, b)
@staticmethod
def backward(ctx, grad_output):
inputs, b = ctx.saved_tensors
pass_through_1 = inputs >= b
pass_through_2 = grad_output < 0
pass_through = pass_through_1 | pass_through_2
return pass_through.type(grad_output.dtype) * grad_output, None
class GDN(nn.Module):
"""Generalized divisive normalization layer.
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
"""
def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1,
reparam_offset=2 ** -18):
super(GDN, self).__init__()
self.inverse = inverse
self.beta_min = beta_min
self.gamma_init = gamma_init
self.reparam_offset = reparam_offset
self.build(ch)
def build(self, ch):
self.pedestal = self.reparam_offset ** 2
self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5
self.gamma_bound = self.reparam_offset
beta = torch.sqrt(torch.ones(ch) + self.pedestal)
self.beta = nn.Parameter(beta)
eye = torch.eye(ch)
g = self.gamma_init * eye
g = g + self.pedestal
gamma = torch.sqrt(g)
self.gamma = nn.Parameter(gamma)
self.pedestal = self.pedestal
def forward(self, inputs):
unfold = False
if inputs.dim() == 5:
unfold = True
bs, ch, d, w, h = inputs.size()
inputs = inputs.view(bs, ch, d * w, h)
_, ch, _, _ = inputs.size()
beta = LowerBound.apply(self.beta, self.beta_bound)
beta = beta ** 2 - self.pedestal
gamma = LowerBound.apply(self.gamma, self.gamma_bound)
gamma = gamma ** 2 - self.pedestal
gamma = gamma.view(ch, ch, 1, 1)
norm_ = nn.functional.conv2d(inputs ** 2, gamma, beta)
norm_ = torch.sqrt(norm_)
if self.inverse:
outputs = inputs * norm_
else:
outputs = inputs / norm_
if unfold:
outputs = outputs.view(bs, ch, d, w, h)
return outputs
class Synthesis_net(nn.Module):
"""
Decode synthesis
"""
def __init__(self, out_channel_N=192, out_channel_M=320):
super(Synthesis_net, self).__init__()
self.deconv1 = nn.ConvTranspose2d(out_channel_M, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv1.weight.data, math.sqrt(2 *
1 * (out_channel_M + out_channel_N) / (out_channel_M +
out_channel_M)))
torch.nn.init.constant_(self.deconv1.bias.data, 0.01)
self.igdn1 = GDN(out_channel_N, inverse=True)
self.deconv2 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv2.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv2.bias.data, 0.01)
self.igdn2 = GDN(out_channel_N, inverse=True)
self.deconv3 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv3.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv3.bias.data, 0.01)
self.igdn3 = GDN(out_channel_N, inverse=True)
self.deconv4 = nn.ConvTranspose2d(out_channel_N, 3, 5, stride=2,
padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv4.weight.data, math.sqrt(2 *
1 * (out_channel_N + 3) / (out_channel_N + out_channel_N)))
torch.nn.init.constant_(self.deconv4.bias.data, 0.01)
def forward(self, x):
x = self.igdn1(self.deconv1(x))
x = self.igdn2(self.deconv2(x))
x = self.igdn3(self.deconv3(x))
x = self.deconv4(x)
return x
def get_inputs():
return [torch.rand([4, 320, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 192 * x2 + 4800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 1280
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = yindex // 320
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 320 * x2 + 5120 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 192 * x2 + 4800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 576
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 75 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_pow_4(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_maximum_mul_pow_sub_5(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 3.814697265625e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + x0, tmp5, None)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_6(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, None)
tmp2 = 0.0010000072652474046
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1.4551915228366852e-11
tmp6 = tmp4 - tmp5
tmp7 = tmp0 + tmp6
tmp9 = libdevice.sqrt(tmp7)
tmp10 = tmp8 * tmp9
tl.store(in_out_ptr0 + x2, tmp7, None)
tl.store(out_ptr0 + x2, tmp10, None)
@triton.jit
def triton_poi_fused_convolution_pow_7(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_ge_maximum_mul_pow_sub_8(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 3.814697265625e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tmp6 = 2.0
tmp7 = tmp2 * tmp6
tmp8 = tmp0 >= tmp1
tl.store(out_ptr0 + x0, tmp5, None)
tl.store(out_ptr1 + x0, tmp7, None)
tl.store(out_ptr2 + x0, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_9(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, None)
tmp2 = 0.0010000072652474046
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1.4551915228366852e-11
tmp6 = tmp4 - tmp5
tmp7 = tmp0 + tmp6
tmp9 = libdevice.sqrt(tmp7)
tmp10 = tmp8 * tmp9
tl.store(in_out_ptr0 + x2, tmp7, None)
tl.store(out_ptr0 + x2, tmp10, None)
@triton.jit
def triton_poi_fused_convolution_pow_10(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + x2, tmp2, None)
tl.store(out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_11(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x2, None)
tmp2 = 0.0010000072652474046
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp3 * tmp3
tmp5 = 1.4551915228366852e-11
tmp6 = tmp4 - tmp5
tmp7 = tmp0 + tmp6
tmp9 = libdevice.sqrt(tmp7)
tmp10 = tmp8 * tmp9
tl.store(in_out_ptr0 + x2, tmp7, None)
tl.store(out_ptr0 + x2, tmp10, None)
@triton.jit
def triton_poi_fused_convolution_12(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12288 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask)
@triton.jit
def triton_poi_fused_ge_maximum_mul_pow_13(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0010000072652474046
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 >= tmp1
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (320, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_2, (192,), (1,))
assert_size_stride(primals_3, (4, 320, 4, 4), (5120, 16, 4, 1))
assert_size_stride(primals_4, (192,), (1,))
assert_size_stride(primals_5, (192, 192), (192, 1))
assert_size_stride(primals_6, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_7, (192,), (1,))
assert_size_stride(primals_8, (192,), (1,))
assert_size_stride(primals_9, (192, 192), (192, 1))
assert_size_stride(primals_10, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_11, (192,), (1,))
assert_size_stride(primals_12, (192,), (1,))
assert_size_stride(primals_13, (192, 192), (192, 1))
assert_size_stride(primals_14, (192, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_15, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((320, 192, 5, 5), (4800, 1, 960, 192),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(61440, 25)](primals_1, buf0, 61440, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 320, 4, 4), (5120, 1, 1280, 320),
torch.float32)
triton_poi_fused_1[grid(1280, 16)](primals_3, buf1, 1280, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192),
torch.float32)
triton_poi_fused_2[grid(36864, 25)](primals_6, buf2, 36864, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf3 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192),
torch.float32)
triton_poi_fused_2[grid(36864, 25)](primals_10, buf3, 36864, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_10
buf4 = empty_strided_cuda((192, 3, 5, 5), (75, 1, 15, 3), torch.float32
)
triton_poi_fused_3[grid(576, 25)](primals_14, buf4, 576, 25, XBLOCK
=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_14
buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf5, (4, 192, 8, 8), (12288, 1, 1536, 192))
buf6 = buf5
del buf5
buf8 = empty_strided_cuda((4, 192, 8, 8), (12288, 1, 1536, 192),
torch.float32)
triton_poi_fused_convolution_pow_4[grid(49152)](buf6, primals_2,
buf8, 49152, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf7 = empty_strided_cuda((192, 192), (192, 1), torch.float32)
triton_poi_fused_maximum_mul_pow_sub_5[grid(36864)](primals_5, buf7,
36864, XBLOCK=512, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf8, reinterpret_tensor(buf7, (
192, 192, 1, 1), (192, 1, 0, 0), 0), stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf9, (4, 192, 8, 8), (12288, 1, 1536, 192))
buf10 = buf9
del buf9
buf11 = empty_strided_cuda((4, 192, 8, 8), (12288, 1, 1536, 192),
torch.float32)
triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_6[grid(49152)](
buf10, primals_4, buf6, buf11, 49152, XBLOCK=512, num_warps=4,
num_stages=1)
buf12 = extern_kernels.convolution(buf11, buf2, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf12, (4, 192, 16, 16), (49152, 1, 3072, 192))
buf13 = buf12
del buf12
buf15 = empty_strided_cuda((4, 192, 16, 16), (49152, 1, 3072, 192),
torch.float32)
triton_poi_fused_convolution_pow_7[grid(196608)](buf13, primals_7,
buf15, 196608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf14 = empty_strided_cuda((192, 192), (192, 1), torch.float32)
buf32 = empty_strided_cuda((192, 192), (192, 1), torch.float32)
buf33 = empty_strided_cuda((192, 192), (192, 1), torch.bool)
triton_poi_fused_ge_maximum_mul_pow_sub_8[grid(36864)](primals_9,
buf14, buf32, buf33, 36864, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf16 = extern_kernels.convolution(buf15, reinterpret_tensor(buf14,
(192, 192, 1, 1), (192, 1, 0, 0), 0), stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf16, (4, 192, 16, 16), (49152, 1, 3072, 192))
buf17 = buf16
del buf16
buf18 = empty_strided_cuda((4, 192, 16, 16), (49152, 1, 3072, 192),
torch.float32)
triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_9[grid(196608)](
buf17, primals_8, buf13, buf18, 196608, XBLOCK=512, num_warps=8,
num_stages=1)
buf19 = extern_kernels.convolution(buf18, buf3, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf19, (4, 192, 32, 32), (196608, 1, 6144, 192))
buf20 = buf19
del buf19
buf22 = empty_strided_cuda((4, 192, 32, 32), (196608, 1, 6144, 192),
torch.float32)
triton_poi_fused_convolution_pow_10[grid(786432)](buf20, primals_11,
buf22, 786432, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf21 = empty_strided_cuda((192, 192), (192, 1), torch.float32)
buf28 = empty_strided_cuda((192, 192), (192, 1), torch.float32)
buf29 = empty_strided_cuda((192, 192), (192, 1), torch.bool)
triton_poi_fused_ge_maximum_mul_pow_sub_8[grid(36864)](primals_13,
buf21, buf28, buf29, 36864, XBLOCK=512, num_warps=4, num_stages=1)
del primals_13
buf23 = extern_kernels.convolution(buf22, reinterpret_tensor(buf21,
(192, 192, 1, 1), (192, 1, 0, 0), 0), stride=(1, 1), padding=(0,
0), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf23, (4, 192, 32, 32), (196608, 1, 6144, 192))
buf24 = buf23
del buf23
buf25 = empty_strided_cuda((4, 192, 32, 32), (196608, 1, 6144, 192),
torch.float32)
triton_poi_fused_convolution_maximum_mul_pow_sqrt_sub_11[grid(786432)](
buf24, primals_12, buf20, buf25, 786432, XBLOCK=512, num_warps=
8, num_stages=1)
buf26 = extern_kernels.convolution(buf25, buf4, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf26, (4, 3, 64, 64), (12288, 1, 192, 3))
buf27 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_12[grid(12, 4096)](buf26, primals_15,
buf27, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf26
del primals_15
buf30 = empty_strided_cuda((192,), (1,), torch.float32)
buf31 = empty_strided_cuda((192,), (1,), torch.bool)
triton_poi_fused_ge_maximum_mul_pow_13[grid(192)](primals_12, buf30,
buf31, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_12
buf34 = empty_strided_cuda((192,), (1,), torch.float32)
buf35 = empty_strided_cuda((192,), (1,), torch.bool)
triton_poi_fused_ge_maximum_mul_pow_13[grid(192)](primals_8, buf34,
buf35, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
return (buf27, buf0, buf1, primals_4, primals_5, buf2, buf3, buf4, buf6,
reinterpret_tensor(buf7, (192, 192, 1, 1), (192, 1, 1, 1), 0), buf8,
buf10, buf11, buf13, reinterpret_tensor(buf14, (192, 192, 1, 1), (
192, 1, 1, 1), 0), buf15, buf17, buf18, buf20, reinterpret_tensor(
buf21, (192, 192, 1, 1), (192, 1, 1, 1), 0), buf22, buf24, buf25,
buf28, buf29, buf30, buf31, buf32, buf33, buf34, buf35)
class LowerBound(Function):
@staticmethod
def forward(ctx, inputs, bound):
b = torch.ones_like(inputs) * bound
ctx.save_for_backward(inputs, b)
return torch.max(inputs, b)
@staticmethod
def backward(ctx, grad_output):
inputs, b = ctx.saved_tensors
pass_through_1 = inputs >= b
pass_through_2 = grad_output < 0
pass_through = pass_through_1 | pass_through_2
return pass_through.type(grad_output.dtype) * grad_output, None
class GDN(nn.Module):
"""Generalized divisive normalization layer.
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
"""
def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1,
reparam_offset=2 ** -18):
super(GDN, self).__init__()
self.inverse = inverse
self.beta_min = beta_min
self.gamma_init = gamma_init
self.reparam_offset = reparam_offset
self.build(ch)
def build(self, ch):
self.pedestal = self.reparam_offset ** 2
self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5
self.gamma_bound = self.reparam_offset
beta = torch.sqrt(torch.ones(ch) + self.pedestal)
self.beta = nn.Parameter(beta)
eye = torch.eye(ch)
g = self.gamma_init * eye
g = g + self.pedestal
gamma = torch.sqrt(g)
self.gamma = nn.Parameter(gamma)
self.pedestal = self.pedestal
def forward(self, inputs):
unfold = False
if inputs.dim() == 5:
unfold = True
bs, ch, d, w, h = inputs.size()
inputs = inputs.view(bs, ch, d * w, h)
_, ch, _, _ = inputs.size()
beta = LowerBound.apply(self.beta, self.beta_bound)
beta = beta ** 2 - self.pedestal
gamma = LowerBound.apply(self.gamma, self.gamma_bound)
gamma = gamma ** 2 - self.pedestal
gamma = gamma.view(ch, ch, 1, 1)
norm_ = nn.functional.conv2d(inputs ** 2, gamma, beta)
norm_ = torch.sqrt(norm_)
if self.inverse:
outputs = inputs * norm_
else:
outputs = inputs / norm_
if unfold:
outputs = outputs.view(bs, ch, d, w, h)
return outputs
class Synthesis_netNew(nn.Module):
"""
Decode synthesis
"""
def __init__(self, out_channel_N=192, out_channel_M=320):
super(Synthesis_netNew, self).__init__()
self.deconv1 = nn.ConvTranspose2d(out_channel_M, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv1.weight.data, math.sqrt(2 *
1 * (out_channel_M + out_channel_N) / (out_channel_M +
out_channel_M)))
torch.nn.init.constant_(self.deconv1.bias.data, 0.01)
self.igdn1 = GDN(out_channel_N, inverse=True)
self.deconv2 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv2.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv2.bias.data, 0.01)
self.igdn2 = GDN(out_channel_N, inverse=True)
self.deconv3 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv3.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv3.bias.data, 0.01)
self.igdn3 = GDN(out_channel_N, inverse=True)
self.deconv4 = nn.ConvTranspose2d(out_channel_N, 3, 5, stride=2,
padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv4.weight.data, math.sqrt(2 *
1 * (out_channel_N + 3) / (out_channel_N + out_channel_N)))
torch.nn.init.constant_(self.deconv4.bias.data, 0.01)
def forward(self, input_0):
primals_1 = self.deconv1.weight
primals_2 = self.deconv1.bias
primals_4 = self.igdn1.beta
primals_5 = self.igdn1.gamma
primals_6 = self.deconv2.weight
primals_7 = self.deconv2.bias
primals_8 = self.igdn2.beta
primals_9 = self.igdn2.gamma
primals_10 = self.deconv3.weight
primals_11 = self.deconv3.bias
primals_12 = self.igdn3.beta
primals_13 = self.igdn3.gamma
primals_14 = self.deconv4.weight
primals_15 = self.deconv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| wemozj/Image-Compression-based-GMM-and-Attention-Module | Synthesis_net | false | 4,568 | [
"Apache-2.0"
] | 0 | 93f804dbcea8ffc1621456f3d104d0342c75373b | https://github.com/wemozj/Image-Compression-based-GMM-and-Attention-Module/tree/93f804dbcea8ffc1621456f3d104d0342c75373b | from torch.autograd import Function
import math
import torch
import torch.nn as nn
import torch.utils.data
class LowerBound(Function):
@staticmethod
def forward(ctx, inputs, bound):
b = torch.ones_like(inputs) * bound
ctx.save_for_backward(inputs, b)
return torch.max(inputs, b)
@staticmethod
def backward(ctx, grad_output):
inputs, b = ctx.saved_tensors
pass_through_1 = inputs >= b
pass_through_2 = grad_output < 0
pass_through = pass_through_1 | pass_through_2
return pass_through.type(grad_output.dtype) * grad_output, None
class GDN(nn.Module):
"""Generalized divisive normalization layer.
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
"""
def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1,
reparam_offset=2 ** -18):
super().__init__()
self.inverse = inverse
self.beta_min = beta_min
self.gamma_init = gamma_init
self.reparam_offset = reparam_offset
self.build(ch)
def build(self, ch):
self.pedestal = self.reparam_offset ** 2
self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5
self.gamma_bound = self.reparam_offset
beta = torch.sqrt(torch.ones(ch) + self.pedestal)
self.beta = nn.Parameter(beta)
eye = torch.eye(ch)
g = self.gamma_init * eye
g = g + self.pedestal
gamma = torch.sqrt(g)
self.gamma = nn.Parameter(gamma)
self.pedestal = self.pedestal
def forward(self, inputs):
unfold = False
if inputs.dim() == 5:
unfold = True
bs, ch, d, w, h = inputs.size()
inputs = inputs.view(bs, ch, d * w, h)
_, ch, _, _ = inputs.size()
beta = LowerBound.apply(self.beta, self.beta_bound)
beta = beta ** 2 - self.pedestal
gamma = LowerBound.apply(self.gamma, self.gamma_bound)
gamma = gamma ** 2 - self.pedestal
gamma = gamma.view(ch, ch, 1, 1)
norm_ = nn.functional.conv2d(inputs ** 2, gamma, beta)
norm_ = torch.sqrt(norm_)
if self.inverse:
outputs = inputs * norm_
else:
outputs = inputs / norm_
if unfold:
outputs = outputs.view(bs, ch, d, w, h)
return outputs
class Model(nn.Module):
"""
Decode synthesis
"""
def __init__(self, out_channel_N=192, out_channel_M=320):
super().__init__()
self.deconv1 = nn.ConvTranspose2d(out_channel_M, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv1.weight.data, math.sqrt(2 *
1 * (out_channel_M + out_channel_N) / (out_channel_M +
out_channel_M)))
torch.nn.init.constant_(self.deconv1.bias.data, 0.01)
self.igdn1 = GDN(out_channel_N, inverse=True)
self.deconv2 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv2.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv2.bias.data, 0.01)
self.igdn2 = GDN(out_channel_N, inverse=True)
self.deconv3 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5,
stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv3.weight.data, math.sqrt(2 * 1)
)
torch.nn.init.constant_(self.deconv3.bias.data, 0.01)
self.igdn3 = GDN(out_channel_N, inverse=True)
self.deconv4 = nn.ConvTranspose2d(out_channel_N, 3, 5, stride=2,
padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv4.weight.data, math.sqrt(2 *
1 * (out_channel_N + 3) / (out_channel_N + out_channel_N)))
torch.nn.init.constant_(self.deconv4.bias.data, 0.01)
def forward(self, x):
x = self.igdn1(self.deconv1(x))
x = self.igd
# ... truncated (>4000 chars) for memory efficiency |
BertSelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/iz/ciztqj6kop3hxov46yrmzprkzfir3eljcic4mkqznz2j5cfeaudr.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float("-inf")
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = (tmp29 != 0)
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = (tmp33 != 0)
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = (tmp38 != 0)
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = (tmp43 != 0)
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr1 + (x2), tmp25, xmask)
tl.store(out_ptr2 + (x2), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/x5/cx5uvbfethxuwwkwxf3xaualzhlcwqsz4jxqpbhintggaypzjwqf.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x4), xmask)
tmp3 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf2, primals_7, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf0, primals_3, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, primals_8, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf9, buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf1, primals_5, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
del buf11
return (reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transform(self, x, linear_layer):
bs, seq_len = x.shape[:2]
proj = linear_layer(x)
proj = proj.view(bs, seq_len, self.num_attention_heads, self.
attention_head_size)
proj = proj.transpose(1, 2)
return proj
def attention(self, key, query, value, attention_mask):
S = torch.matmul(query, key.transpose(-1, -2)
) / self.attention_head_size ** (1 / 2)
if attention_mask is not None:
S += attention_mask
S_p = F.softmax(S, dim=-1)
S_p = self.dropout(S_p)
context_layer = torch.matmul(S_p, value)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
return context_layer
def forward(self, hidden_states, attention_mask):
"""
hidden_states: [bs, seq_len, hidden_state]
attention_mask: [bs, 1, 1, seq_len]
output: [bs, seq_len, hidden_state]
"""
key_layer = self.transform(hidden_states, self.key)
value_layer = self.transform(hidden_states, self.value)
query_layer = self.transform(hidden_states, self.query)
attn_value = self.attention(key_layer, query_layer, value_layer,
attention_mask)
return attn_value
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(num_attention_heads=4, hidden_size=
4, attention_probs_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float('-inf')
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = tmp29 != 0
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = tmp33 != 0
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38 != 0
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = tmp43 != 0
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
tl.store(out_ptr2 + x2, tmp45, xmask)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_out_ptr0 + x4, xmask)
tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf2, primals_7, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf4 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf2
triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_3[grid(16, 4)](buf1, primals_5, buf10, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf11 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf11
return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class BertSelfAttentionNew(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transform(self, x, linear_layer):
bs, seq_len = x.shape[:2]
proj = linear_layer(x)
proj = proj.view(bs, seq_len, self.num_attention_heads, self.
attention_head_size)
proj = proj.transpose(1, 2)
return proj
def attention(self, key, query, value, attention_mask):
S = torch.matmul(query, key.transpose(-1, -2)
) / self.attention_head_size ** (1 / 2)
if attention_mask is not None:
S += attention_mask
S_p = F.softmax(S, dim=-1)
S_p = self.dropout(S_p)
context_layer = torch.matmul(S_p, value)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
return context_layer
def forward(self, input_0, input_1):
primals_2 = self.query.weight
primals_3 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| SamarthMM/cs769-assignments | BertSelfAttention | false | 4,569 | [
"MIT"
] | 0 | bac2ad57c50043608276df8e0f21181ef62696c7 | https://github.com/SamarthMM/cs769-assignments/tree/bac2ad57c50043608276df8e0f21181ef62696c7 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transform(self, x, linear_layer):
bs, seq_len = x.shape[:2]
proj = linear_layer(x)
proj = proj.view(bs, seq_len, self.num_attention_heads, self.
attention_head_size)
proj = proj.transpose(1, 2)
return proj
def attention(self, key, query, value, attention_mask):
S = torch.matmul(query, key.transpose(-1, -2)
) / self.attention_head_size ** (1 / 2)
if attention_mask is not None:
S += attention_mask
S_p = F.softmax(S, dim=-1)
S_p = self.dropout(S_p)
context_layer = torch.matmul(S_p, value)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
return context_layer
def forward(self, hidden_states, attention_mask):
"""
hidden_states: [bs, seq_len, hidden_state]
attention_mask: [bs, 1, 1, seq_len]
output: [bs, seq_len, hidden_state]
"""
key_layer = self.transform(hidden_states, self.key)
value_layer = self.transform(hidden_states, self.value)
query_layer = self.transform(hidden_states, self.query)
attn_value = self.attention(key_layer, query_layer, value_layer,
attention_mask)
return attn_value
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(num_attention_heads=4, hidden_size=
4, attention_probs_dropout_prob=0.5)}]
|
SFU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [m], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# m => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yh/cyhkfzhsf4tit5utgl7ktic6f326pw35wtjfhpxsiyik7twarblq.py
# Topologically Sorted Source Nodes: [r, g, mul, sub, mul_1, o], Original ATen: [aten.tanh, aten.sigmoid, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# g => sigmoid
# mul => mul
# mul_1 => mul_1
# o => add
# r => tanh
# sub => sub
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [m], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [r, g, mul, sub, mul_1, o], Original ATen: [aten.tanh, aten.sigmoid, aten.mul, aten.rsub, aten.add]
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1.run(buf2, buf1, primals_1, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, primals_1, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
class SFU(torch.nn.Module):
"""
only two input, one input vector and one fusion vector
Args:
- input_size:
- fusions_size:
Inputs:
- input: (seq_len, batch, input_size)
- fusions: (seq_len, batch, fusions_size)
Outputs:
- output: (seq_len, batch, input_size)
"""
def __init__(self, input_size, fusions_size):
super(SFU, self).__init__()
self.linear_r = torch.nn.Linear(input_size + fusions_size, input_size)
self.linear_g = torch.nn.Linear(input_size + fusions_size, input_size)
def forward(self, input, fusions):
m = torch.cat((input, fusions), dim=-1)
r = F.tanh(self.linear_r(m))
g = F.sigmoid(self.linear_g(m))
o = g * r + (1 - g) * input
return o
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'fusions_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(256)](buf2, buf1,
primals_1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_1, reinterpret_tensor(buf0, (64, 8), (8, 1), 0
), buf1, buf2
class SFUNew(torch.nn.Module):
"""
only two input, one input vector and one fusion vector
Args:
- input_size:
- fusions_size:
Inputs:
- input: (seq_len, batch, input_size)
- fusions: (seq_len, batch, fusions_size)
Outputs:
- output: (seq_len, batch, input_size)
"""
def __init__(self, input_size, fusions_size):
super(SFUNew, self).__init__()
self.linear_r = torch.nn.Linear(input_size + fusions_size, input_size)
self.linear_g = torch.nn.Linear(input_size + fusions_size, input_size)
def forward(self, input_0, input_1):
primals_3 = self.linear_r.weight
primals_4 = self.linear_r.bias
primals_5 = self.linear_g.weight
primals_6 = self.linear_g.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| xdong73S/Match_LSTM_v2.0 | SFU | false | 4,570 | [
"MIT"
] | 0 | dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | https://github.com/xdong73S/Match_LSTM_v2.0/tree/dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | import torch
import torch.utils.data
import torch.nn.functional as F
class Model(torch.nn.Module):
"""
only two input, one input vector and one fusion vector
Args:
- input_size:
- fusions_size:
Inputs:
- input: (seq_len, batch, input_size)
- fusions: (seq_len, batch, fusions_size)
Outputs:
- output: (seq_len, batch, input_size)
"""
def __init__(self, input_size, fusions_size):
super().__init__()
self.linear_r = torch.nn.Linear(input_size + fusions_size, input_size)
self.linear_g = torch.nn.Linear(input_size + fusions_size, input_size)
def forward(self, input, fusions):
m = torch.cat((input, fusions), dim=-1)
r = F.tanh(self.linear_r(m))
g = F.sigmoid(self.linear_g(m))
o = g * r + (1 - g) * input
return o
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SpecialEncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_4, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_4, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_4, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/z5/cz5xs7y3thsep5yn6qoths757rduuevog6mtea3nqr4nwnh2olnx.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = (xindex // 32)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/42/c422tkkkbumvpbbws6fmqskkh22zkquxu5sr3arajoh4o6d6tnnw.py
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax_1 => amax_1, exp_1, sub_2
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [2], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 2
x2 = (xindex // 8)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (8*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (2 + x0 + (8*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (4 + x0 + (8*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (6 + x0 + (8*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sd/csdgsdl6mmpvoragwfnwhbdsep3m6mgkzrjf5q5jlwtxogqlbhpu.py
# Topologically Sorted Source Nodes: [attn_map], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# attn_map => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%permute_1, %permute_3], 1), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex % 16
x0 = xindex % 4
x1 = (xindex // 4) % 4
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x4) + (32*x3) + x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + ((2*x0) + (32*x3) + x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr0 + (8 + (2*x0) + (32*x3) + x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x3) + x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.load(in_ptr0 + (24 + (2*x0) + (32*x3) + x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tmp5 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr1 + ((2*x1) + (8*x0) + (32*x3) + ((-2) + x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + ((8*x0) + (32*x3) + ((-2) + x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr1 + (2 + (8*x0) + (32*x3) + ((-2) + x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.load(in_ptr1 + (4 + (8*x0) + (32*x3) + ((-2) + x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 + tmp23
tmp25 = tl.load(in_ptr1 + (6 + (8*x0) + (32*x3) + ((-2) + x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp19 / tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp16, tmp27, tmp28)
tmp30 = tl.where(tmp4, tmp15, tmp29)
tl.store(out_ptr0 + (x5), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ur/curih3fwuu3mdlxzllk3sntxux74abqsvpj3fu4yqkjlccckj7nm.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/f5/cf54cp2yj2nqsdqclstksyuxqtb64paty3icoqv6sr4eroweijif.py
# Topologically Sorted Source Nodes: [tgt2_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# tgt2_2 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zc/czcnoplfggoe6gxdkbbytfhttxmvudgflsx4nblpci3ddmeh4oon.py
# Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_2 => var_mean_2
# Graph fragment:
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_15, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_native_layer_norm_7 = async_compile.triton('triton_poi_fused_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ss/css6ndidxpg5b6ao3c3ca332wl3q6ntycluvcxlfl3rq7gtlelis.py
# Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_2 => add_6, add_7, mul_4, mul_5, rsqrt_2, sub_4
# Graph fragment:
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_15, %getitem_5), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %rsqrt_2), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %primals_15), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %primals_16), kwargs = {})
triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5s/c5stk5glscu2vhkyygzl37kwox56bq47ibhaq46siyunrac2divm.py
# Topologically Sorted Source Nodes: [relu_], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu_ => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_18,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_20, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cs/ccsko6axmghf5opun7oldfuzwck2miyrvybdwzv5jxatbhz5oca4.py
# Topologically Sorted Source Nodes: [tgt2_4], Original ATen: [aten.view]
# Source node to ATen node mapping:
# tgt2_4 => view_22
# Graph fragment:
# %view_22 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_20, [64, 4]), kwargs = {})
triton_poi_fused_view_10 = async_compile.triton('triton_poi_fused_view_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3n/c3nfizw2ntzxzaq4lgbgvecmi2qgnixh7vzgsb34mjjrfelxsr22.py
# Topologically Sorted Source Nodes: [tgt2_2, tgt, tgt_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# tgt => add_5
# tgt2_2 => add_4
# tgt_1 => add_8
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %primals_14), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_4), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %view_23), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (2, 4), (4, 1))
assert_size_stride(primals_6, (2, ), (1, ))
assert_size_stride(primals_7, (2, 4), (4, 1))
assert_size_stride(primals_8, (2, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4, ), (1, ))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_4, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_4, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 128, grid=grid(128), stream=stream0)
buf5 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf5, buf6, 128, grid=grid(128), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_map], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(buf4, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf4
del buf6
buf8 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 64), 0); del buf1 # reuse
buf9 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 64), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(primals_1, buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf8, buf9, primals_9, primals_10, buf10, 256, grid=grid(256), stream=stream0)
del primals_10
del primals_9
buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf11, primals_12, buf12, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_12
buf13 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt2_2], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf13, buf14, 64, 4, grid=grid(64, 4), stream=stream0)
buf15 = reinterpret_tensor(buf13, (64, 4), (4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [tgt2_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf14, (64, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf15)
buf16 = buf9; del buf9 # reuse
buf17 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_7.run(primals_1, buf15, primals_14, buf16, buf17, 64, grid=grid(64), stream=stream0)
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_8.run(primals_1, buf15, primals_14, buf16, buf17, primals_15, primals_16, buf18, 256, grid=grid(256), stream=stream0)
del buf16
del buf17
del primals_16
buf19 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf18, (64, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf19 # reuse
buf24 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu_], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_9.run(buf20, primals_18, buf24, 256, grid=grid(256), stream=stream0)
del primals_18
buf21 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt2_4], Original ATen: [aten.view]
triton_poi_fused_view_10.run(buf20, buf21, 256, grid=grid(256), stream=stream0)
buf22 = reinterpret_tensor(buf20, (64, 4), (4, 1), 0); del buf20 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf21, reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf22)
buf23 = reinterpret_tensor(buf22, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf22 # reuse
# Topologically Sorted Source Nodes: [tgt2_2, tgt, tgt_1], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf23, primals_1, buf15, primals_14, primals_20, 256, grid=grid(256), stream=stream0)
del primals_20
return (buf23, primals_1, primals_4, primals_14, primals_15, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, buf5, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(buf14, (64, 4), (4, 1), 0), buf15, reinterpret_tensor(buf18, (64, 4), (4, 1), 0), buf21, primals_19, buf24, primals_17, primals_13, reinterpret_tensor(buf12, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), primals_11, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SpecialEncoderLayer(nn.Module):
def __init__(self, heads, d_in, d_out, d_ff, p_drop=0.1):
super(SpecialEncoderLayer, self).__init__()
self.heads = heads
self.norm = nn.LayerNorm(d_in)
self.proj_pair_1 = nn.Linear(d_in, heads // 2)
self.proj_pair_2 = nn.Linear(d_in, heads // 2)
self.proj_msa = nn.Linear(d_out, d_out)
self.proj_out = nn.Linear(d_out, d_out)
self.drop_1 = nn.Dropout(p_drop)
self.drop_2 = nn.Dropout(p_drop)
self.drop_3 = nn.Dropout(p_drop)
self.linear1 = nn.Linear(d_out, d_ff)
self.dropout = nn.Dropout(p_drop)
self.linear2 = nn.Linear(d_ff, d_out)
self.norm1 = nn.LayerNorm(d_out)
self.norm2 = nn.LayerNorm(d_out)
def forward(self, src, tgt):
B, N, L = tgt.shape[:3]
src = self.norm(src)
attn_map_1 = F.softmax(self.proj_pair_1(src), dim=1).permute(0, 3, 1, 2
)
attn_map_2 = F.softmax(self.proj_pair_2(src), dim=2).permute(0, 3, 2, 1
)
attn_map = torch.cat((attn_map_1, attn_map_2), dim=1)
attn_map = self.drop_1(attn_map).unsqueeze(1)
tgt2 = self.norm1(tgt.view(B * N, L, -1)).view(B, N, L, -1)
value = self.proj_msa(tgt2).permute(0, 3, 1, 2).contiguous().view(B,
-1, self.heads, N, L)
tgt2 = torch.matmul(value, attn_map).view(B, -1, N, L).permute(0, 2,
3, 1)
tgt2 = self.proj_out(tgt2)
tgt = tgt + self.drop_2(tgt2)
tgt2 = self.norm2(tgt.view(B * N, L, -1)).view(B, N, L, -1)
tgt2 = self.linear2(self.dropout(F.relu_(self.linear1(tgt2))))
tgt = tgt + self.drop_3(tgt2)
return tgt
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'heads': 4, 'd_in': 4, 'd_out': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = xindex // 32
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 2
x2 = xindex // 8
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 8 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (2 + x0 + 8 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (4 + x0 + 8 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (6 + x0 + 8 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x4 + 32 * x3 + x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (2 * x0 + 32 * x3 + x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr0 + (8 + 2 * x0 + 32 * x3 + x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x3 + x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.load(in_ptr0 + (24 + 2 * x0 + 32 * x3 + x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tmp5 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr1 + (2 * x1 + 8 * x0 + 32 * x3 + (-2 + x2)),
tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (8 * x0 + 32 * x3 + (-2 + x2)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr1 + (2 + 8 * x0 + 32 * x3 + (-2 + x2)), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tmp20 + tmp21
tmp23 = tl.load(in_ptr1 + (4 + 8 * x0 + 32 * x3 + (-2 + x2)), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 + tmp23
tmp25 = tl.load(in_ptr1 + (6 + 8 * x0 + 32 * x3 + (-2 + x2)), tmp16 &
xmask, eviction_policy='evict_last', other=0.0)
tmp26 = tmp24 + tmp25
tmp27 = tmp19 / tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp16, tmp27, tmp28)
tmp30 = tl.where(tmp4, tmp15, tmp29)
tl.store(out_ptr0 + x5, tmp30, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (2, 4), (4, 1))
assert_size_stride(primals_6, (2,), (1,))
assert_size_stride(primals_7, (2, 4), (4, 1))
assert_size_stride(primals_8, (2,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_4, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_4, buf0,
buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_5, (4, 2), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused__softmax_2[grid(128)](buf3, buf4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_7, (4, 2), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused__softmax_3[grid(128)](buf5, buf6, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_cat_4[grid(256)](buf4, buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf4
del buf6
buf8 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 64), 0)
del buf1
buf9 = reinterpret_tensor(buf0, (16, 4, 1), (4, 1, 64), 0)
del buf0
triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf8,
buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf8,
buf9, primals_9, primals_10, buf10, 256, XBLOCK=256, num_warps=
4, num_stages=1)
del primals_10
del primals_9
buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_5[grid(16, 16)](buf11, primals_12, buf12, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_12
buf13 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0)
del buf11
extern_kernels.bmm(reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf13)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_6[grid(64, 4)](buf13, buf14, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf15 = reinterpret_tensor(buf13, (64, 4), (4, 1), 0)
del buf13
extern_kernels.mm(reinterpret_tensor(buf14, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf15)
buf16 = buf9
del buf9
buf17 = buf8
del buf8
triton_poi_fused_native_layer_norm_7[grid(64)](primals_1, buf15,
primals_14, buf16, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_8[grid(256)](primals_1, buf15,
primals_14, buf16, buf17, primals_15, primals_16, buf18, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf16
del buf17
del primals_16
buf19 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf19
buf24 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_9[grid(256)](buf20,
primals_18, buf24, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_18
buf21 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_10[grid(256)](buf20, buf21, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf22 = reinterpret_tensor(buf20, (64, 4), (4, 1), 0)
del buf20
extern_kernels.mm(buf21, reinterpret_tensor(primals_19, (4, 4), (1,
4), 0), out=buf22)
buf23 = reinterpret_tensor(buf22, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf22
triton_poi_fused_add_11[grid(256)](buf23, primals_1, buf15,
primals_14, primals_20, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_20
return (buf23, primals_1, primals_4, primals_14, primals_15,
reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, buf5,
reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(
buf14, (64, 4), (4, 1), 0), buf15, reinterpret_tensor(buf18, (64, 4
), (4, 1), 0), buf21, primals_19, buf24, primals_17, primals_13,
reinterpret_tensor(buf12, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), primals_11,
primals_7, primals_5)
class SpecialEncoderLayerNew(nn.Module):
def __init__(self, heads, d_in, d_out, d_ff, p_drop=0.1):
super(SpecialEncoderLayerNew, self).__init__()
self.heads = heads
self.norm = nn.LayerNorm(d_in)
self.proj_pair_1 = nn.Linear(d_in, heads // 2)
self.proj_pair_2 = nn.Linear(d_in, heads // 2)
self.proj_msa = nn.Linear(d_out, d_out)
self.proj_out = nn.Linear(d_out, d_out)
self.drop_1 = nn.Dropout(p_drop)
self.drop_2 = nn.Dropout(p_drop)
self.drop_3 = nn.Dropout(p_drop)
self.linear1 = nn.Linear(d_out, d_ff)
self.dropout = nn.Dropout(p_drop)
self.linear2 = nn.Linear(d_ff, d_out)
self.norm1 = nn.LayerNorm(d_out)
self.norm2 = nn.LayerNorm(d_out)
def forward(self, input_0, input_1):
primals_2 = self.norm.weight
primals_3 = self.norm.bias
primals_5 = self.proj_pair_1.weight
primals_6 = self.proj_pair_1.bias
primals_7 = self.proj_pair_2.weight
primals_8 = self.proj_pair_2.bias
primals_11 = self.proj_msa.weight
primals_9 = self.proj_msa.bias
primals_13 = self.proj_out.weight
primals_10 = self.proj_out.bias
primals_17 = self.linear1.weight
primals_12 = self.linear1.bias
primals_19 = self.linear2.weight
primals_14 = self.linear2.bias
primals_15 = self.norm1.weight
primals_16 = self.norm1.bias
primals_18 = self.norm2.weight
primals_20 = self.norm2.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0]
| wukevin/RoseTTAFold | SpecialEncoderLayer | false | 4,571 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, heads, d_in, d_out, d_ff, p_drop=0.1):
super().__init__()
self.heads = heads
self.norm = nn.LayerNorm(d_in)
self.proj_pair_1 = nn.Linear(d_in, heads // 2)
self.proj_pair_2 = nn.Linear(d_in, heads // 2)
self.proj_msa = nn.Linear(d_out, d_out)
self.proj_out = nn.Linear(d_out, d_out)
self.drop_1 = nn.Dropout(p_drop)
self.drop_2 = nn.Dropout(p_drop)
self.drop_3 = nn.Dropout(p_drop)
self.linear1 = nn.Linear(d_out, d_ff)
self.dropout = nn.Dropout(p_drop)
self.linear2 = nn.Linear(d_ff, d_out)
self.norm1 = nn.LayerNorm(d_out)
self.norm2 = nn.LayerNorm(d_out)
def forward(self, src, tgt):
B, N, L = tgt.shape[:3]
src = self.norm(src)
attn_map_1 = F.softmax(self.proj_pair_1(src), dim=1).permute(0, 3, 1, 2
)
attn_map_2 = F.softmax(self.proj_pair_2(src), dim=2).permute(0, 3, 2, 1
)
attn_map = torch.cat((attn_map_1, attn_map_2), dim=1)
attn_map = self.drop_1(attn_map).unsqueeze(1)
tgt2 = self.norm1(tgt.view(B * N, L, -1)).view(B, N, L, -1)
value = self.proj_msa(tgt2).permute(0, 3, 1, 2).contiguous().view(B,
-1, self.heads, N, L)
tgt2 = torch.matmul(value, attn_map).view(B, -1, N, L).permute(0, 2,
3, 1)
tgt2 = self.proj_out(tgt2)
tgt = tgt + self.drop_2(tgt2)
tgt2 = self.norm2(tgt.view(B * N, L, -1)).view(B, N, L, -1)
tgt2 = self.linear2(self.dropout(F.relu_(self.linear1(tgt2))))
tgt = tgt + self.drop_3(tgt2)
return tgt
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 4]
|
SeqToSeqAtten | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/pi/cpimpjbrrsowg4vcexq5tvi5ak5vgs3g6i46pzg632y26q6cqe7k.py
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum]
# Source node to ATen node mapping:
# e_x => exp
# e_x_1 => mul_1
# max_1 => max_1
# sub => sub
# sum_1 => sum_1
# x => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, %unsqueeze), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 2, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %unsqueeze), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2], True), kwargs = {})
triton_poi_fused_exp_max_mul_sub_sum_0 = async_compile.triton('triton_poi_fused_exp_max_mul_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_max_mul_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_max_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 * tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 * tmp1
tmp18 = tmp5 - tmp14
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 * tmp4
tmp21 = tmp17 + tmp20
tmp22 = tmp9 - tmp14
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 * tmp8
tmp25 = tmp21 + tmp24
tmp26 = tmp13 - tmp14
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp27 * tmp12
tmp29 = tmp25 + tmp28
tl.store(out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr1 + (x2), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/we/cwecj3jyc6ckuorz3lmfwdm45efqgzsoqiexv7vswdms53agblzv.py
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, add, softmax], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# e_x => exp
# e_x_1 => mul_1
# max_1 => max_1
# softmax => div
# sub => sub
# x => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, %unsqueeze), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 2, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %unsqueeze), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add), kwargs = {})
triton_poi_fused_add_div_exp_max_mul_sub_1 = async_compile.triton('triton_poi_fused_add_div_exp_max_mul_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_max_mul_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_exp_max_mul_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
x4 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp1
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = tmp6 / tmp9
tl.store(in_out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [alpha], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (4, 16, 1), 0), reinterpret_tensor(arg1_1, (4, 4, 4), (4, 1, 16), 0), out=buf0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_max_mul_sub_sum_0.run(buf0, arg2_1, buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, add, softmax], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.add, aten.div]
triton_poi_fused_add_div_exp_max_mul_sub_1.run(buf3, arg2_1, buf1, buf2, 64, grid=grid(64), stream=stream0)
del arg2_1
del buf1
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [alpha_seq2], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(arg1_1, (4, 4, 4), (4, 16, 1), 0), out=buf4)
del arg1_1
return (reinterpret_tensor(buf4, (4, 4, 4), (4, 16, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class SeqToSeqAtten(torch.nn.Module):
"""
Args:
-
Inputs:
- h1: (seq1_len, batch, hidden_size)
- h1_mask: (batch, seq1_len)
- h2: (seq2_len, batch, hidden_size)
- h2_mask: (batch, seq2_len)
Outputs:
- output: (seq1_len, batch, hidden_size)
- alpha: (batch, seq1_len, seq2_len)
"""
def __init__(self):
super(SeqToSeqAtten, self).__init__()
def forward(self, h1, h2, h2_mask):
h1 = h1.transpose(0, 1)
h2 = h2.transpose(0, 1)
alpha = h1.bmm(h2.transpose(1, 2))
alpha = masked_softmax(alpha, h2_mask.unsqueeze(1), dim=2)
alpha_seq2 = alpha.bmm(h2)
alpha_seq2 = alpha_seq2.transpose(0, 1)
return alpha_seq2, alpha
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_exp_max_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 * tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 * tmp1
tmp18 = tmp5 - tmp14
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 * tmp4
tmp21 = tmp17 + tmp20
tmp22 = tmp9 - tmp14
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 * tmp8
tmp25 = tmp21 + tmp24
tmp26 = tmp13 - tmp14
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp27 * tmp12
tmp29 = tmp25 + tmp28
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp29, xmask)
@triton.jit
def triton_poi_fused_add_div_exp_max_mul_sub_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp1
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = tmp6 / tmp9
tl.store(in_out_ptr0 + x3, tmp10, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 4), (4, 16, 1),
0), reinterpret_tensor(arg1_1, (4, 4, 4), (4, 1, 16), 0), out=buf0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_max_mul_sub_sum_0[grid(16)](buf0, arg2_1, buf1,
buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = buf0
del buf0
triton_poi_fused_add_div_exp_max_mul_sub_1[grid(64)](buf3, arg2_1,
buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg2_1
del buf1
del buf2
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(arg1_1, (4, 4, 4), (4,
16, 1), 0), out=buf4)
del arg1_1
return reinterpret_tensor(buf4, (4, 4, 4), (4, 16, 1), 0), buf3
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class SeqToSeqAttenNew(torch.nn.Module):
"""
Args:
-
Inputs:
- h1: (seq1_len, batch, hidden_size)
- h1_mask: (batch, seq1_len)
- h2: (seq2_len, batch, hidden_size)
- h2_mask: (batch, seq2_len)
Outputs:
- output: (seq1_len, batch, hidden_size)
- alpha: (batch, seq1_len, seq2_len)
"""
def __init__(self):
super(SeqToSeqAttenNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| xdong73S/Match_LSTM_v2.0 | SeqToSeqAtten | false | 4,572 | [
"MIT"
] | 0 | dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | https://github.com/xdong73S/Match_LSTM_v2.0/tree/dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | import torch
import torch.utils.data
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class Model(torch.nn.Module):
"""
Args:
-
Inputs:
- h1: (seq1_len, batch, hidden_size)
- h1_mask: (batch, seq1_len)
- h2: (seq2_len, batch, hidden_size)
- h2_mask: (batch, seq2_len)
Outputs:
- output: (seq1_len, batch, hidden_size)
- alpha: (batch, seq1_len, seq2_len)
"""
def __init__(self):
super().__init__()
def forward(self, h1, h2, h2_mask):
h1 = h1.transpose(0, 1)
h2 = h2.transpose(0, 1)
alpha = h1.bmm(h2.transpose(1, 2))
alpha = masked_softmax(alpha, h2_mask.unsqueeze(1), dim=2)
alpha_seq2 = alpha.bmm(h2)
alpha_seq2 = alpha_seq2.transpose(0, 1)
return alpha_seq2, alpha
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
EncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [src2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src2 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [src2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src2 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/f7/cf7sn2zuocejdqbnd3mvanomh5vpl5kzmdrwuhcmdcgbrq5f6jhe.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fm/cfmciqu6iddc3els2hqmyf3guz6ufjk5fyfagjmrrmgnbo4der6d.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_2, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_2, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ps/cpsvdl7kaggvl22ouohpv3etytpfj5pvanq43jur444ykdyrszcl.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_2, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([16, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default_1, %div_tensor), kwargs = {})
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pz/cpzvm5pqqkfkzq5s4qhnnqoumohxspeeg42cxw6jyx2vq7ujo6ty.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/je/cje5rciemo4ccfstwfifaqepc6lg5nsfeupef7hkbzdljrb5654q.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7f/c7f4kwch2ginirg5uowxomfgvcczsc2novwstiyjr473exwmdrbu.py
# Topologically Sorted Source Nodes: [src, src2_3], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add_2
# src2_3 => var_mean_1
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_19), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cf/ccfpoxdlvqmljccicjmql4or4ugeje3nnzn2ursuge5av2lvenrw.py
# Topologically Sorted Source Nodes: [src, src2_3], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add_2
# src2_3 => add_3, add_4, mul_2, mul_3, rsqrt_1, sub_2
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_19), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_12), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_13), kwargs = {})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5s/c5stk5glscu2vhkyygzl37kwox56bq47ibhaq46siyunrac2divm.py
# Topologically Sorted Source Nodes: [relu_], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu_ => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_21,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_23, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cs/ccsko6axmghf5opun7oldfuzwck2miyrvybdwzv5jxatbhz5oca4.py
# Topologically Sorted Source Nodes: [src2_4], Original ATen: [aten.view]
# Source node to ATen node mapping:
# src2_4 => view_25
# Graph fragment:
# %view_25 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_23, [64, 4]), kwargs = {})
triton_poi_fused_view_10 = async_compile.triton('triton_poi_fused_view_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bx/cbxptlm6luu7k3qqnq7vihfeuhjpbejv5kahnjecvtbiyu5filud.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src => add_2
# src_1 => add_5
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_19), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_26), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [src2], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf3, primals_5, buf6, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_5
buf7 = reinterpret_tensor(buf3, (16, 4, 1, 4), (16, 4, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf4, primals_7, buf7, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf6, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf7, (64, 1, 4), (4, 0, 1), 0), out=buf8)
buf9 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf8, buf9, 1024, grid=grid(1024), stream=stream0)
buf10 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(buf8, buf9, buf10, 1024, grid=grid(1024), stream=stream0)
del buf8
del buf9
buf11 = reinterpret_tensor(buf4, (16, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(buf5, primals_9, buf11, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_9
buf12 = reinterpret_tensor(buf5, (64, 4, 1), (4, 1, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf10, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (64, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf12, buf13, 64, 4, grid=grid(64, 4), stream=stream0)
buf14 = reinterpret_tensor(buf12, (64, 4), (4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf13, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14)
del primals_11
buf15 = buf1; del buf1 # reuse
buf16 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [src, src2_3], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf14, buf15, buf16, 64, grid=grid(64), stream=stream0)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src, src2_3], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(primals_1, buf14, buf15, buf16, primals_12, primals_13, buf17, 256, grid=grid(256), stream=stream0)
del buf15
del buf16
del primals_13
buf18 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf18 # reuse
buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu_], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_9.run(buf19, primals_15, buf23, 256, grid=grid(256), stream=stream0)
del primals_15
buf20 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src2_4], Original ATen: [aten.view]
triton_poi_fused_view_10.run(buf19, buf20, 256, grid=grid(256), stream=stream0)
buf21 = reinterpret_tensor(buf19, (64, 4), (4, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf20, reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf22, primals_1, buf14, primals_17, 256, grid=grid(256), stream=stream0)
del primals_17
return (buf22, primals_1, primals_12, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf10, reinterpret_tensor(buf11, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf7, (64, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf13, (64, 4), (4, 1), 0), buf14, reinterpret_tensor(buf17, (64, 4), (4, 1), 0), buf20, primals_16, buf23, primals_14, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def orthogonal_matrix_chunk(cols, qr_uniform_q=False, device=None):
unstructured_block = torch.randn((cols, cols), device=device)
q, r = torch.linalg.qr(unstructured_block.cpu(), 'reduced')
q, r = map(lambda t: t, (q, r))
if qr_uniform_q:
d = torch.diag(r, 0)
q *= d.sign()
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0,
qr_uniform_q=False, device=None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim
=1)
elif scaling == 1:
multiplier = math.sqrt(float(nb_columns)) * torch.ones((nb_rows,),
device=device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
def generalized_kernel(data, *, projection_matrix, kernel_fn=nn.ReLU(
inplace=True), kernel_epsilon=0.001, normalize_data=True, device=None):
_b, _h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
data = data_normalizer * data
data = torch.matmul(data, projection_matrix.T)
data = kernel_fn(data) + kernel_epsilon
return data.type_as(data)
def linear_attention(q, k, v):
L = k.shape[-2]
D_inv = 1.0 / torch.einsum('...nd,...d->...n', q, k.mean(dim=-2))
context = torch.einsum('...nd,...ne->...de', k / float(L), v)
del k, v
out = torch.einsum('...n,...nd->...nd', D_inv, q)
del D_inv, q
out = torch.einsum('...nd,...de->...ne', out, context)
return out
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=
True, eps=0.0001, device=None):
b, h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
ratio = projection_matrix.shape[0] ** -0.5
projection = projection_matrix.unsqueeze(0).repeat(h, 1, 1)
projection = projection.unsqueeze(0).repeat(b, 1, 1, 1)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', data_normalizer * data,
projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = diag_data / 2.0 * data_normalizer ** 2
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash)) + eps)
return data_dash.type_as(data)
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module,
type)]
def get_module_device(module):
return next(module.parameters()).device
class MultiheadAttention(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super(MultiheadAttention, self).__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value):
batch, L = query.shape[:2]
q = self.to_query(query).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, self.d_k).permute(0,
2, 1, 3)
v = self.to_value(value).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
attention = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.matmul(attention, v)
out = out.permute(0, 2, 1, 3).contiguous().view(batch, L, -1)
out = self.to_out(out)
return out
class FastAttention(nn.Module):
def __init__(self, dim_heads, nb_features=None, ortho_scaling=0,
generalized_attention=False, kernel_fn=nn.ReLU(inplace=True),
qr_uniform_q=False, no_projection=False):
super().__init__()
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads))
)
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
if not no_projection:
self.create_projection = partial(gaussian_orthogonal_random_matrix,
nb_rows=self.nb_features, nb_columns=dim_heads, scaling=
ortho_scaling, qr_uniform_q=qr_uniform_q)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
self.no_projection = no_projection
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device=device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, q, k, v):
device = q.device
if self.no_projection:
q = q.softmax(dim=-1)
k.softmax(dim=-2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn=self.
kernel_fn, projection_matrix=self.projection_matrix, device
=device)
q, k = map(create_kernel, (q, k))
else:
create_kernel = partial(softmax_kernel, projection_matrix=self.
projection_matrix, device=device)
q = create_kernel(q, is_query=True)
k = create_kernel(k, is_query=False)
attn_fn = linear_attention
out = attn_fn(q, k, v)
return out
class SelfAttention(nn.Module):
def __init__(self, dim, k_dim=None, heads=8, local_heads=0,
local_window_size=256, nb_features=None, feature_redraw_interval=
1000, generalized_attention=False, kernel_fn=nn.ReLU(inplace=True),
qr_uniform_q=False, dropout=0.0, no_projection=False):
super().__init__()
assert dim % heads == 0, 'dimension must be divisible by number of heads'
dim_head = dim // heads
inner_dim = dim_head * heads
if k_dim is None:
k_dim = dim
self.fast_attention = FastAttention(dim_head, nb_features,
generalized_attention=generalized_attention, kernel_fn=
kernel_fn, qr_uniform_q=qr_uniform_q, no_projection=no_projection)
self.heads = heads
self.dim = dim
self.to_query = nn.Linear(dim, inner_dim)
self.to_key = nn.Linear(k_dim, inner_dim)
self.to_value = nn.Linear(k_dim, inner_dim)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout, inplace=True)
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
self.max_tokens = 2 ** 16
def check_redraw_projections(self):
if not self.training:
return
if exists(self.feature_redraw_interval
) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(self)
fast_attentions = find_modules(self, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def _batched_forward(self, q, k, v):
b1, h, n1 = q.shape[:3]
out = torch.empty((b1, h, n1, self.dim // h), dtype=q.dtype, device
=q.device)
shift = self.max_tokens // n1
for i_b in range(0, b1, shift):
start = i_b
end = min(i_b + shift, b1)
out[start:end] = self.fast_attention(q[start:end], k[start:end],
v[start:end])
return out
def forward(self, query, key, value, **kwargs):
self.check_redraw_projections()
b1, n1, _, h = *query.shape, self.heads
b2, n2, _, h = *key.shape, self.heads
q = self.to_query(query)
k = self.to_key(key)
v = self.to_value(value)
q = q.reshape(b1, n1, h, -1).permute(0, 2, 1, 3)
k = k.reshape(b2, n2, h, -1).permute(0, 2, 1, 3)
v = v.reshape(b2, n2, h, -1).permute(0, 2, 1, 3)
if b1 * n1 > self.max_tokens or b2 * n2 > self.max_tokens:
out = self._batched_forward(q, k, v)
else:
out = self.fast_attention(q, k, v)
out = out.permute(0, 2, 1, 3).reshape(b1, n1, -1)
out = self.to_out(out)
return self.dropout(out)
class EncoderLayer(nn.Module):
def __init__(self, d_model, d_ff, heads, p_drop=0.1, performer_opts=None):
super(EncoderLayer, self).__init__()
self.use_performer = performer_opts is not None
if self.use_performer:
self.attn = SelfAttention(dim=d_model, heads=heads, dropout=
p_drop, nb_features=64, generalized_attention=True, **
performer_opts)
else:
self.attn = MultiheadAttention(d_model, heads, dropout=p_drop)
self.linear1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(p_drop)
self.linear2 = nn.Linear(d_ff, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(p_drop)
self.dropout2 = nn.Dropout(p_drop)
def forward(self, src):
B, N, L = src.shape[:3]
src2 = self.norm1(src)
src2 = src2.reshape(B * N, L, -1)
src2 = self.attn(src2, src2, src2).reshape(B, N, L, -1)
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(F.relu_(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_2[grid(64, 4)](buf3, primals_5, buf6, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_5
buf7 = reinterpret_tensor(buf3, (16, 4, 1, 4), (16, 4, 4, 1), 0)
del buf3
triton_poi_fused_2[grid(64, 4)](buf4, primals_7, buf7, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (64, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf7, (64, 1, 4), (4, 0, 1), 0), out=buf8)
buf9 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_3[grid(1024)](buf8, buf9, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused_4[grid(1024)](buf8, buf9, buf10, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
del buf8
del buf9
buf11 = reinterpret_tensor(buf4, (16, 4, 4, 1), (16, 4, 1, 1), 0)
del buf4
triton_poi_fused_5[grid(64, 4)](buf5, primals_9, buf11, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_9
buf12 = reinterpret_tensor(buf5, (64, 4, 1), (4, 1, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf10, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf11, (64, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_6[grid(64, 4)](buf12, buf13, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf14 = reinterpret_tensor(buf12, (64, 4), (4, 1), 0)
del buf12
extern_kernels.addmm(primals_11, reinterpret_tensor(buf13, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_11
buf15 = buf1
del buf1
buf16 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_1, buf14,
buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_8[grid(256)](primals_1,
buf14, buf15, buf16, primals_12, primals_13, buf17, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del buf15
del buf16
del primals_13
buf18 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf18
buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_9[grid(256)](buf19,
primals_15, buf23, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
buf20 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_10[grid(256)](buf19, buf20, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf21 = reinterpret_tensor(buf19, (64, 4), (4, 1), 0)
del buf19
extern_kernels.mm(buf20, reinterpret_tensor(primals_16, (4, 4), (1,
4), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf21
triton_poi_fused_add_11[grid(256)](buf22, primals_1, buf14,
primals_17, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
return (buf22, primals_1, primals_12, reinterpret_tensor(buf2, (64, 4),
(4, 1), 0), buf10, reinterpret_tensor(buf11, (64, 1, 4), (4, 1, 1),
0), reinterpret_tensor(buf6, (64, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf7, (64, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf13, (64, 4), (4, 1), 0), buf14,
reinterpret_tensor(buf17, (64, 4), (4, 1), 0), buf20, primals_16,
buf23, primals_14, primals_10, primals_8, primals_6, primals_4)
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def orthogonal_matrix_chunk(cols, qr_uniform_q=False, device=None):
unstructured_block = torch.randn((cols, cols), device=device)
q, r = torch.linalg.qr(unstructured_block.cpu(), 'reduced')
q, r = map(lambda t: t, (q, r))
if qr_uniform_q:
d = torch.diag(r, 0)
q *= d.sign()
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0,
qr_uniform_q=False, device=None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim
=1)
elif scaling == 1:
multiplier = math.sqrt(float(nb_columns)) * torch.ones((nb_rows,),
device=device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
def generalized_kernel(data, *, projection_matrix, kernel_fn=nn.ReLU(
inplace=True), kernel_epsilon=0.001, normalize_data=True, device=None):
_b, _h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
data = data_normalizer * data
data = torch.matmul(data, projection_matrix.T)
data = kernel_fn(data) + kernel_epsilon
return data.type_as(data)
def linear_attention(q, k, v):
L = k.shape[-2]
D_inv = 1.0 / torch.einsum('...nd,...d->...n', q, k.mean(dim=-2))
context = torch.einsum('...nd,...ne->...de', k / float(L), v)
del k, v
out = torch.einsum('...n,...nd->...nd', D_inv, q)
del D_inv, q
out = torch.einsum('...nd,...de->...ne', out, context)
return out
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=
True, eps=0.0001, device=None):
b, h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
ratio = projection_matrix.shape[0] ** -0.5
projection = projection_matrix.unsqueeze(0).repeat(h, 1, 1)
projection = projection.unsqueeze(0).repeat(b, 1, 1, 1)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', data_normalizer * data,
projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = diag_data / 2.0 * data_normalizer ** 2
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash)) + eps)
return data_dash.type_as(data)
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module,
type)]
def get_module_device(module):
return next(module.parameters()).device
class MultiheadAttention(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super(MultiheadAttention, self).__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value):
batch, L = query.shape[:2]
q = self.to_query(query).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, self.d_k).permute(0,
2, 1, 3)
v = self.to_value(value).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
attention = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.matmul(attention, v)
out = out.permute(0, 2, 1, 3).contiguous().view(batch, L, -1)
out = self.to_out(out)
return out
class FastAttention(nn.Module):
def __init__(self, dim_heads, nb_features=None, ortho_scaling=0,
generalized_attention=False, kernel_fn=nn.ReLU(inplace=True),
qr_uniform_q=False, no_projection=False):
super().__init__()
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads))
)
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
if not no_projection:
self.create_projection = partial(gaussian_orthogonal_random_matrix,
nb_rows=self.nb_features, nb_columns=dim_heads, scaling=
ortho_scaling, qr_uniform_q=qr_uniform_q)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
self.no_projection = no_projection
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device=device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, q, k, v):
device = q.device
if self.no_projection:
q = q.softmax(dim=-1)
k.softmax(dim=-2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn=self.
kernel_fn, projection_matrix=self.projection_matrix, device
=device)
q, k = map(create_kernel, (q, k))
else:
create_kernel = partial(softmax_kernel, projection_matrix=self.
projection_matrix, device=device)
q = create_kernel(q, is_query=True)
k = create_kernel(k, is_query=False)
attn_fn = linear_attention
out = attn_fn(q, k, v)
return out
class SelfAttention(nn.Module):
def __init__(self, dim, k_dim=None, heads=8, local_heads=0,
local_window_size=256, nb_features=None, feature_redraw_interval=
1000, generalized_attention=False, kernel_fn=nn.ReLU(inplace=True),
qr_uniform_q=False, dropout=0.0, no_projection=False):
super().__init__()
assert dim % heads == 0, 'dimension must be divisible by number of heads'
dim_head = dim // heads
inner_dim = dim_head * heads
if k_dim is None:
k_dim = dim
self.fast_attention = FastAttention(dim_head, nb_features,
generalized_attention=generalized_attention, kernel_fn=
kernel_fn, qr_uniform_q=qr_uniform_q, no_projection=no_projection)
self.heads = heads
self.dim = dim
self.to_query = nn.Linear(dim, inner_dim)
self.to_key = nn.Linear(k_dim, inner_dim)
self.to_value = nn.Linear(k_dim, inner_dim)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout, inplace=True)
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
self.max_tokens = 2 ** 16
def check_redraw_projections(self):
if not self.training:
return
if exists(self.feature_redraw_interval
) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(self)
fast_attentions = find_modules(self, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def _batched_forward(self, q, k, v):
b1, h, n1 = q.shape[:3]
out = torch.empty((b1, h, n1, self.dim // h), dtype=q.dtype, device
=q.device)
shift = self.max_tokens // n1
for i_b in range(0, b1, shift):
start = i_b
end = min(i_b + shift, b1)
out[start:end] = self.fast_attention(q[start:end], k[start:end],
v[start:end])
return out
def forward(self, query, key, value, **kwargs):
self.check_redraw_projections()
b1, n1, _, h = *query.shape, self.heads
b2, n2, _, h = *key.shape, self.heads
q = self.to_query(query)
k = self.to_key(key)
v = self.to_value(value)
q = q.reshape(b1, n1, h, -1).permute(0, 2, 1, 3)
k = k.reshape(b2, n2, h, -1).permute(0, 2, 1, 3)
v = v.reshape(b2, n2, h, -1).permute(0, 2, 1, 3)
if b1 * n1 > self.max_tokens or b2 * n2 > self.max_tokens:
out = self._batched_forward(q, k, v)
else:
out = self.fast_attention(q, k, v)
out = out.permute(0, 2, 1, 3).reshape(b1, n1, -1)
out = self.to_out(out)
return self.dropout(out)
class EncoderLayerNew(nn.Module):
def __init__(self, d_model, d_ff, heads, p_drop=0.1, performer_opts=None):
super(EncoderLayerNew, self).__init__()
self.use_performer = performer_opts is not None
if self.use_performer:
self.attn = SelfAttention(dim=d_model, heads=heads, dropout=
p_drop, nb_features=64, generalized_attention=True, **
performer_opts)
else:
self.attn = MultiheadAttention(d_model, heads, dropout=p_drop)
self.linear1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(p_drop)
self.linear2 = nn.Linear(d_ff, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(p_drop)
self.dropout2 = nn.Dropout(p_drop)
def forward(self, input_0):
primals_4 = self.attn.to_query.weight
primals_2 = self.attn.to_query.bias
primals_6 = self.attn.to_key.weight
primals_3 = self.attn.to_key.bias
primals_8 = self.attn.to_value.weight
primals_5 = self.attn.to_value.bias
primals_10 = self.attn.to_out.weight
primals_7 = self.attn.to_out.bias
primals_14 = self.linear1.weight
primals_9 = self.linear1.bias
primals_16 = self.linear2.weight
primals_11 = self.linear2.bias
primals_12 = self.norm1.weight
primals_13 = self.norm1.bias
primals_15 = self.norm2.weight
primals_17 = self.norm2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0]
| wukevin/RoseTTAFold | EncoderLayer | false | 4,573 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def orthogonal_matrix_chunk(cols, qr_uniform_q=False, device=None):
unstructured_block = torch.randn((cols, cols), device=device)
q, r = torch.linalg.qr(unstructured_block.cpu(), 'reduced')
q, r = map(lambda t: t, (q, r))
if qr_uniform_q:
d = torch.diag(r, 0)
q *= d.sign()
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0,
qr_uniform_q=False, device=None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim
=1)
elif scaling == 1:
multiplier = math.sqrt(float(nb_columns)) * torch.ones((nb_rows,),
device=device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
def generalized_kernel(data, *, projection_matrix, kernel_fn=nn.ReLU(
inplace=True), kernel_epsilon=0.001, normalize_data=True, device=None):
_b, _h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
data = data_normalizer * data
data = torch.matmul(data, projection_matrix.T)
data = kernel_fn(data) + kernel_epsilon
return data.type_as(data)
def linear_attention(q, k, v):
L = k.shape[-2]
D_inv = 1.0 / torch.einsum('...nd,...d->...n', q, k.mean(dim=-2))
context = torch.einsum('...nd,...ne->...de', k / float(L), v)
del k, v
out = torch.einsum('...n,...nd->...nd', D_inv, q)
del D_inv, q
out = torch.einsum('...nd,...de->...ne', out, context)
return out
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=
True, eps=0.0001, device=None):
b, h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
ratio = projection_matrix.shape[0] ** -0.5
projection = projection_matrix.unsqueeze(0).repeat(h, 1, 1)
projection = projection.unsqueeze(0).repeat(b, 1, 1, 1)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', data_normalizer * data,
projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = diag_data / 2.0 * data_normalizer ** 2
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash)) + eps)
return data_dash.type_as(data)
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module,
type)]
def get_module_device(module):
return next(module.parameters()).device
class MultiheadAttention(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super().__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim,
# ... truncated (>4000 chars) for memory efficiency |
Analysis_prior_net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/hl/chlfnxlup6hobrajoyq4lywkcbacpsytxarxuteecr7a66ui7g3j.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 61440
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = (yindex // 320)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (320*x2) + (2880*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/y3/cy3oon3avepm7bu7eq6xt6f22uu327nh4y2pjl2vcq3qwqqhcnt3.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 36864
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = (yindex // 192)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (192*x2) + (4800*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jc/cjcvqqqi5il5l27yjzqwzirz2vqs3djcnzdlyjuzsjiirxveahrh.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.abs]
# Source node to ATen node mapping:
# x => abs_1
# Graph fragment:
# %abs_1 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_abs_2 = async_compile.triton('triton_poi_fused_abs_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1280
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = (yindex // 320)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tmp1 = tl_math.abs(tmp0)
tl.store(out_ptr0 + (y0 + (320*x2) + (1310720*y1)), tmp1, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/no/cnowew42n3tkj4vl2ajg4ubv77b6csnfmhp4bxmexyvfyprwjp2k.py
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%abs_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 3145728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dg/cdg4dhdpwk4grsvlnb56cmhua4iykveolouw6wd3dv5i2muvipo5.py
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_2 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 786432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lg/clgkmvsi2gcmjjc6sntuae6rll63yaui4qr3xhcllsv7mzzomagq.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 768
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 192
y1 = (yindex // 192)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (192*x2) + (49152*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (256*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 320, 64, 64), (1310720, 4096, 64, 1))
assert_size_stride(primals_2, (192, 320, 3, 3), (2880, 9, 3, 1))
assert_size_stride(primals_3, (192, ), (1, ))
assert_size_stride(primals_4, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_5, (192, ), (1, ))
assert_size_stride(primals_6, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_7, (192, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((192, 320, 3, 3), (2880, 1, 960, 320), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_2, buf0, 61440, 9, grid=grid(61440, 9), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_4, buf1, 36864, 25, grid=grid(36864, 25), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf2, 36864, 25, grid=grid(36864, 25), stream=stream0)
del primals_6
buf3 = empty_strided_cuda((4, 320, 64, 64), (1310720, 1, 20480, 320), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.abs]
triton_poi_fused_abs_2.run(primals_1, buf3, 1280, 4096, grid=grid(1280, 4096), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 192, 64, 64), (786432, 1, 12288, 192))
buf5 = empty_strided_cuda((4, 192, 64, 64), (786432, 1, 12288, 192), torch.bool)
buf6 = empty_strided_cuda((4, 192, 64, 64), (786432, 1, 12288, 192), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_3.run(buf4, primals_3, buf5, buf6, 3145728, grid=grid(3145728), stream=stream0)
del buf4
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 192, 32, 32), (196608, 1, 6144, 192))
buf8 = empty_strided_cuda((4, 192, 32, 32), (196608, 1, 6144, 192), torch.bool)
buf9 = empty_strided_cuda((4, 192, 32, 32), (196608, 1, 6144, 192), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf7, primals_5, buf8, buf9, 786432, grid=grid(786432), stream=stream0)
del buf7
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, buf2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 192, 16, 16), (49152, 1, 3072, 192))
buf11 = empty_strided_cuda((4, 192, 16, 16), (49152, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf10, primals_7, buf11, 768, 256, grid=grid(768, 256), stream=stream0)
del buf10
del primals_7
return (buf11, buf0, buf1, buf2, buf3, buf5, buf6, buf8, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 320, 64, 64), (1310720, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((192, 320, 3, 3), (2880, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((192, 192, 5, 5), (4800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((192, 192, 5, 5), (4800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((192, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.utils.data
class Analysis_prior_net(nn.Module):
"""
Analysis prior net
"""
def __init__(self, out_channel_N=192, out_channel_M=320):
super(Analysis_prior_net, self).__init__()
self.conv1 = nn.Conv2d(out_channel_M, out_channel_N, 3, stride=1,
padding=1)
torch.nn.init.xavier_normal_(self.conv1.weight.data, math.sqrt(2 *
(out_channel_M + out_channel_N) / (out_channel_M + out_channel_M)))
torch.nn.init.constant_(self.conv1.bias.data, 0.01)
self.relu1 = nn.LeakyReLU()
self.conv2 = nn.Conv2d(out_channel_N, out_channel_N, 5, stride=2,
padding=2)
torch.nn.init.xavier_normal_(self.conv2.weight.data, math.sqrt(2))
torch.nn.init.constant_(self.conv2.bias.data, 0.01)
self.relu2 = nn.LeakyReLU()
self.conv3 = nn.Conv2d(out_channel_N, out_channel_N, 5, stride=2,
padding=2)
torch.nn.init.xavier_normal_(self.conv3.weight.data, math.sqrt(2))
torch.nn.init.constant_(self.conv3.bias.data, 0.01)
def forward(self, x):
x = torch.abs(x)
x = self.relu1(self.conv1(x))
x = self.relu2(self.conv2(x))
return self.conv3(x)
def get_inputs():
return [torch.rand([4, 320, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = yindex // 320
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 320 * x2 + 2880 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 192
y1 = yindex // 192
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 192 * x2 + 4800 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_abs_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 1280
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 320
y1 = yindex // 320
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl_math.abs(tmp0)
tl.store(out_ptr0 + (y0 + 320 * x2 + 1310720 * y1), tmp1, ymask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 192
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 768
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 192
y1 = yindex // 192
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 192 * x2 + 49152 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 256 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 320, 64, 64), (1310720, 4096, 64, 1))
assert_size_stride(primals_2, (192, 320, 3, 3), (2880, 9, 3, 1))
assert_size_stride(primals_3, (192,), (1,))
assert_size_stride(primals_4, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_5, (192,), (1,))
assert_size_stride(primals_6, (192, 192, 5, 5), (4800, 25, 5, 1))
assert_size_stride(primals_7, (192,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((192, 320, 3, 3), (2880, 1, 960, 320),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(61440, 9)](primals_2, buf0, 61440, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192),
torch.float32)
triton_poi_fused_1[grid(36864, 25)](primals_4, buf1, 36864, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((192, 192, 5, 5), (4800, 1, 960, 192),
torch.float32)
triton_poi_fused_1[grid(36864, 25)](primals_6, buf2, 36864, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf3 = empty_strided_cuda((4, 320, 64, 64), (1310720, 1, 20480, 320
), torch.float32)
triton_poi_fused_abs_2[grid(1280, 4096)](primals_1, buf3, 1280,
4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf4 = extern_kernels.convolution(buf3, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 192, 64, 64), (786432, 1, 12288, 192))
buf5 = empty_strided_cuda((4, 192, 64, 64), (786432, 1, 12288, 192),
torch.bool)
buf6 = empty_strided_cuda((4, 192, 64, 64), (786432, 1, 12288, 192),
torch.float32)
triton_poi_fused_convolution_leaky_relu_3[grid(3145728)](buf4,
primals_3, buf5, buf6, 3145728, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf4
del primals_3
buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 192, 32, 32), (196608, 1, 6144, 192))
buf8 = empty_strided_cuda((4, 192, 32, 32), (196608, 1, 6144, 192),
torch.bool)
buf9 = empty_strided_cuda((4, 192, 32, 32), (196608, 1, 6144, 192),
torch.float32)
triton_poi_fused_convolution_leaky_relu_4[grid(786432)](buf7,
primals_5, buf8, buf9, 786432, XBLOCK=512, num_warps=8,
num_stages=1)
del buf7
del primals_5
buf10 = extern_kernels.convolution(buf9, buf2, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 192, 16, 16), (49152, 1, 3072, 192))
buf11 = empty_strided_cuda((4, 192, 16, 16), (49152, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_5[grid(768, 256)](buf10, primals_7,
buf11, 768, 256, XBLOCK=16, YBLOCK=256, num_warps=8, num_stages=1)
del buf10
del primals_7
return buf11, buf0, buf1, buf2, buf3, buf5, buf6, buf8, buf9
class Analysis_prior_netNew(nn.Module):
"""
Analysis prior net
"""
def __init__(self, out_channel_N=192, out_channel_M=320):
super(Analysis_prior_netNew, self).__init__()
self.conv1 = nn.Conv2d(out_channel_M, out_channel_N, 3, stride=1,
padding=1)
torch.nn.init.xavier_normal_(self.conv1.weight.data, math.sqrt(2 *
(out_channel_M + out_channel_N) / (out_channel_M + out_channel_M)))
torch.nn.init.constant_(self.conv1.bias.data, 0.01)
self.relu1 = nn.LeakyReLU()
self.conv2 = nn.Conv2d(out_channel_N, out_channel_N, 5, stride=2,
padding=2)
torch.nn.init.xavier_normal_(self.conv2.weight.data, math.sqrt(2))
torch.nn.init.constant_(self.conv2.bias.data, 0.01)
self.relu2 = nn.LeakyReLU()
self.conv3 = nn.Conv2d(out_channel_N, out_channel_N, 5, stride=2,
padding=2)
torch.nn.init.xavier_normal_(self.conv3.weight.data, math.sqrt(2))
torch.nn.init.constant_(self.conv3.bias.data, 0.01)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| wemozj/Image-Compression-based-GMM-and-Attention-Module | Analysis_prior_net | false | 4,574 | [
"Apache-2.0"
] | 0 | 93f804dbcea8ffc1621456f3d104d0342c75373b | https://github.com/wemozj/Image-Compression-based-GMM-and-Attention-Module/tree/93f804dbcea8ffc1621456f3d104d0342c75373b | import math
import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
Analysis prior net
"""
def __init__(self, out_channel_N=192, out_channel_M=320):
super().__init__()
self.conv1 = nn.Conv2d(out_channel_M, out_channel_N, 3, stride=1,
padding=1)
torch.nn.init.xavier_normal_(self.conv1.weight.data, math.sqrt(2 *
(out_channel_M + out_channel_N) / (out_channel_M + out_channel_M)))
torch.nn.init.constant_(self.conv1.bias.data, 0.01)
self.relu1 = nn.LeakyReLU()
self.conv2 = nn.Conv2d(out_channel_N, out_channel_N, 5, stride=2,
padding=2)
torch.nn.init.xavier_normal_(self.conv2.weight.data, math.sqrt(2))
torch.nn.init.constant_(self.conv2.bias.data, 0.01)
self.relu2 = nn.LeakyReLU()
self.conv3 = nn.Conv2d(out_channel_N, out_channel_N, 5, stride=2,
padding=2)
torch.nn.init.xavier_normal_(self.conv3.weight.data, math.sqrt(2))
torch.nn.init.constant_(self.conv3.bias.data, 0.01)
def forward(self, x):
x = torch.abs(x)
x = self.relu1(self.conv1(x))
x = self.relu2(self.conv2(x))
return self.conv3(x)
def get_inputs():
return [torch.rand([4, 320, 64, 64])]
def get_init_inputs():
return []
|
MatchRNNAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bm/cbm6tfyfbvibw2xrkq2kd7b6hr2lb7vsiarwrnwzu3dpny6acnhb.py
# Topologically Sorted Source Nodes: [add, add_1, G], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# G => tanh
# add => add
# add_1 => add_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %unsqueeze), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %unsqueeze_1), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x2), xmask)
tmp8 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp11 = libdevice.tanh(tmp10)
tl.store(in_out_ptr0 + (x2), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mc/cmczcqleqvynpeitxpozsz7si5a5pzlhofcqqm4cfrnysouzl3tw.py
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1, add_2], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum, aten.add]
# Source node to ATen node mapping:
# add_2 => add_2
# e_x => exp
# e_x_1 => mul_1
# max_1 => max_1
# sub => sub
# sum_1 => sum_1
# x => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_4, %primals_12), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %primals_12), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1], True), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
triton_poi_fused_add_exp_max_mul_sub_sum_1 = async_compile.triton('triton_poi_fused_add_exp_max_mul_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_max_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_max_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x4 = xindex % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (64 + x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (128 + x4), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (192 + x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp0 * tmp6
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp0 * tmp9
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp13 * tmp1
tmp15 = tmp4 - tmp11
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 * tmp3
tmp18 = tmp14 + tmp17
tmp19 = tmp7 - tmp11
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp20 * tmp6
tmp22 = tmp18 + tmp21
tmp23 = tmp10 - tmp11
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp24 * tmp9
tmp26 = tmp22 + tmp25
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tl.store(out_ptr0 + (x5), tmp11, xmask)
tl.store(out_ptr1 + (x5), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jz/cjz2ly2sfrbducoq4qen3topgalw5xptx3mrfsbmnrixwpb75wx3.py
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1, add_2, softmax], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum, aten.add, aten.div]
# Source node to ATen node mapping:
# add_2 => add_2
# e_x => exp
# e_x_1 => mul_1
# max_1 => max_1
# softmax => div
# sub => sub
# sum_1 => sum_1
# x => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_4, %primals_12), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %primals_12), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1], True), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_2), kwargs = {})
triton_poi_fused_add_div_exp_max_mul_sub_sum_2 = async_compile.triton('triton_poi_fused_add_div_exp_max_mul_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_max_mul_sub_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_exp_max_mul_sub_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 16
x3 = (xindex // 256)
x4 = xindex % 256
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (16*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp1
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x6), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (1, 4), (4, 1))
assert_size_stride(primals_11, (1, ), (1, ))
assert_size_stride(primals_12, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, add_1, G], Original ATen: [aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_tanh_0.run(buf3, primals_2, buf1, primals_5, buf2, primals_8, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_5
del primals_8
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_11
buf6 = reinterpret_tensor(buf2, (4, 1, 4, 4, 4), (64, 256, 16, 4, 1), 0); del buf2 # reuse
buf7 = reinterpret_tensor(buf1, (4, 1, 4, 4, 4), (64, 256, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1, add_2], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum, aten.add]
triton_poi_fused_add_exp_max_mul_sub_sum_1.run(buf5, primals_12, buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1, add_2, softmax], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum, aten.add, aten.div]
triton_poi_fused_add_div_exp_max_mul_sub_sum_2.run(buf5, primals_12, buf6, buf7, buf8, 1024, grid=grid(1024), stream=stream0)
del buf6
del buf7
return (buf8, primals_12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf3, buf5, primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class MatchRNNAttention(torch.nn.Module):
"""
attention mechanism in match-rnn
Args:
- input_size: The number of expected features in the input Hp and Hq
- hidden_size: The number of features in the hidden state Hr
Inputs:
Hpi(batch, input_size): a context word encoded
Hq(question_len, batch, input_size): whole question encoded
Hr_last(batch, hidden_size): last lstm hidden output
Outputs:
alpha(batch, question_len): attention vector
"""
def __init__(self, hp_input_size, hq_input_size, hidden_size):
super(MatchRNNAttention, self).__init__()
self.linear_wq = torch.nn.Linear(hq_input_size, hidden_size)
self.linear_wp = torch.nn.Linear(hp_input_size, hidden_size)
self.linear_wr = torch.nn.Linear(hidden_size, hidden_size)
self.linear_wg = torch.nn.Linear(hidden_size, 1)
def forward(self, Hpi, Hq, Hr_last, Hq_mask):
wq_hq = self.linear_wq(Hq)
wp_hp = self.linear_wp(Hpi).unsqueeze(0)
wr_hr = self.linear_wr(Hr_last).unsqueeze(0)
G = F.tanh(wq_hq + wp_hp + wr_hr)
wg_g = self.linear_wg(G).squeeze(2).transpose(0, 1)
alpha = masked_softmax(wg_g, m=Hq_mask, dim=1)
return alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hp_input_size': 4, 'hq_input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x2, xmask)
tmp8 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp11 = libdevice.tanh(tmp10)
tl.store(in_out_ptr0 + x2, tmp11, xmask)
@triton.jit
def triton_poi_fused_add_exp_max_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (64 + x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (128 + x4), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (192 + x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp0 * tmp6
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp0 * tmp9
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp13 * tmp1
tmp15 = tmp4 - tmp11
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 * tmp3
tmp18 = tmp14 + tmp17
tmp19 = tmp7 - tmp11
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp20 * tmp6
tmp22 = tmp18 + tmp21
tmp23 = tmp10 - tmp11
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp24 * tmp9
tmp26 = tmp22 + tmp25
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tl.store(out_ptr0 + x5, tmp11, xmask)
tl.store(out_ptr1 + x5, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_exp_max_mul_sub_sum_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 16
x3 = xindex // 256
x4 = xindex % 256
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr3 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp1
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x6, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (1, 4), (4, 1))
assert_size_stride(primals_11, (1,), (1,))
assert_size_stride(primals_12, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf0
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(256)](buf3, primals_2, buf1,
primals_5, buf2, primals_8, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_5
del primals_8
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf3, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_11
buf6 = reinterpret_tensor(buf2, (4, 1, 4, 4, 4), (64, 256, 16, 4, 1), 0
)
del buf2
buf7 = reinterpret_tensor(buf1, (4, 1, 4, 4, 4), (64, 256, 16, 4, 1), 0
)
del buf1
triton_poi_fused_add_exp_max_mul_sub_sum_1[grid(256)](buf5,
primals_12, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_add_div_exp_max_mul_sub_sum_2[grid(1024)](buf5,
primals_12, buf6, buf7, buf8, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del buf7
return buf8, primals_12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf3, buf5, primals_10
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class MatchRNNAttentionNew(torch.nn.Module):
"""
attention mechanism in match-rnn
Args:
- input_size: The number of expected features in the input Hp and Hq
- hidden_size: The number of features in the hidden state Hr
Inputs:
Hpi(batch, input_size): a context word encoded
Hq(question_len, batch, input_size): whole question encoded
Hr_last(batch, hidden_size): last lstm hidden output
Outputs:
alpha(batch, question_len): attention vector
"""
def __init__(self, hp_input_size, hq_input_size, hidden_size):
super(MatchRNNAttentionNew, self).__init__()
self.linear_wq = torch.nn.Linear(hq_input_size, hidden_size)
self.linear_wp = torch.nn.Linear(hp_input_size, hidden_size)
self.linear_wr = torch.nn.Linear(hidden_size, hidden_size)
self.linear_wg = torch.nn.Linear(hidden_size, 1)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.linear_wq.weight
primals_2 = self.linear_wq.bias
primals_4 = self.linear_wp.weight
primals_5 = self.linear_wp.bias
primals_7 = self.linear_wr.weight
primals_8 = self.linear_wr.bias
primals_10 = self.linear_wg.weight
primals_11 = self.linear_wg.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
primals_12 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| xdong73S/Match_LSTM_v2.0 | MatchRNNAttention | false | 4,575 | [
"MIT"
] | 0 | dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | https://github.com/xdong73S/Match_LSTM_v2.0/tree/dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | import torch
import torch.utils.data
import torch.nn.functional as F
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class Model(torch.nn.Module):
"""
attention mechanism in match-rnn
Args:
- input_size: The number of expected features in the input Hp and Hq
- hidden_size: The number of features in the hidden state Hr
Inputs:
Hpi(batch, input_size): a context word encoded
Hq(question_len, batch, input_size): whole question encoded
Hr_last(batch, hidden_size): last lstm hidden output
Outputs:
alpha(batch, question_len): attention vector
"""
def __init__(self, hp_input_size, hq_input_size, hidden_size):
super().__init__()
self.linear_wq = torch.nn.Linear(hq_input_size, hidden_size)
self.linear_wp = torch.nn.Linear(hp_input_size, hidden_size)
self.linear_wr = torch.nn.Linear(hidden_size, hidden_size)
self.linear_wg = torch.nn.Linear(hidden_size, 1)
def forward(self, Hpi, Hq, Hr_last, Hq_mask):
wq_hq = self.linear_wq(Hq)
wp_hp = self.linear_wp(Hpi).unsqueeze(0)
wr_hr = self.linear_wr(Hr_last).unsqueeze(0)
G = F.tanh(wq_hq + wp_hp + wr_hr)
wg_g = self.linear_wg(G).squeeze(2).transpose(0, 1)
alpha = masked_softmax(wg_g, m=Hq_mask, dim=1)
return alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
AttentionPooling | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/vu/cvuv73ijvl45rys2kvk2dud7shg4nznn622fzyldpf7pmxppx3o5.py
# Topologically Sorted Source Nodes: [q_tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# q_tanh => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/34/c34c33icw4ddtko6e42ipvw7crvbsbhere7wy26bixycn2y3uzsy.py
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum]
# Source node to ATen node mapping:
# e_x => exp
# e_x_1 => mul_1
# max_1 => max_1
# sub => sub
# sum_1 => sum_1
# x => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, %primals_6), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %primals_6), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1], True), kwargs = {})
triton_poi_fused_exp_max_mul_sub_sum_1 = async_compile.triton('triton_poi_fused_exp_max_mul_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_max_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_max_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 * tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 * tmp1
tmp18 = tmp5 - tmp14
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 * tmp4
tmp21 = tmp17 + tmp20
tmp22 = tmp9 - tmp14
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 * tmp8
tmp25 = tmp21 + tmp24
tmp26 = tmp13 - tmp14
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp27 * tmp12
tmp29 = tmp25 + tmp28
tl.store(out_ptr0 + (x0), tmp14, xmask)
tl.store(out_ptr1 + (x0), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ke/ckesc3qjdwwnmydaabmb4bbxskdz2mktvenabshozsdj6wdn33ae.py
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, add, softmax], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# e_x => exp
# e_x_1 => mul_1
# max_1 => max_1
# softmax => div
# sub => sub
# x => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, %primals_6), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %primals_6), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add), kwargs = {})
triton_poi_fused_add_div_exp_max_mul_sub_2 = async_compile.triton('triton_poi_fused_add_div_exp_max_mul_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_max_mul_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_exp_max_mul_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (x1 + (4*y0)), xmask & ymask)
tmp3 = tl.load(in_ptr2 + (y0), ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp1
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = tmp6 / tmp9
tl.store(out_ptr0 + (x1 + (4*y0)), tmp10, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/h6/ch6bafozqfkjjkpo4c4dyiba65qeun72gg5z3pz456zdtwhj66ke.py
# Topologically Sorted Source Nodes: [rq_o], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# rq_o => tanh_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_tanh_3 = async_compile.triton('triton_poi_fused_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [q_tanh], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum]
triton_poi_fused_exp_max_mul_sub_sum_1.run(buf3, primals_6, buf4, buf5, 4, grid=grid(4), stream=stream0)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, add, softmax], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.add, aten.div]
triton_poi_fused_add_div_exp_max_mul_sub_2.run(buf3, primals_6, buf4, buf5, buf6, 4, 4, grid=grid(4, 4), stream=stream0)
del buf4
del buf5
buf7 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (4, 0, 1), 0), reinterpret_tensor(primals_3, (4, 4, 4), (4, 16, 1), 0), out=buf7)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [rq_o], Original ATen: [aten.tanh]
triton_poi_fused_tanh_3.run(buf9, primals_8, 16, grid=grid(16), stream=stream0)
del primals_8
return (buf9, primals_3, primals_6, buf1, buf3, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), buf9, primals_7, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class AttentionPooling(torch.nn.Module):
"""
Attention-Pooling for pointer net init hidden state generate.
Equal to Self-Attention + MLP
Modified from r-net.
Args:
input_size: The number of expected features in the input uq
output_size: The number of expected features in the output rq_o
Inputs: input, mask
- **input** (seq_len, batch, input_size): tensor containing the features
of the input sequence.
- **mask** (batch, seq_len): tensor show whether a padding index for each element in the batch.
Outputs: output
- **output** (batch, output_size): tensor containing the output features
"""
def __init__(self, input_size, output_size):
super(AttentionPooling, self).__init__()
self.linear_u = torch.nn.Linear(input_size, output_size)
self.linear_t = torch.nn.Linear(output_size, 1)
self.linear_o = torch.nn.Linear(input_size, output_size)
def forward(self, uq, mask):
q_tanh = F.tanh(self.linear_u(uq))
q_s = self.linear_t(q_tanh).squeeze(2).transpose(0, 1)
alpha = masked_softmax(q_s, mask, dim=1)
rq = torch.bmm(alpha.unsqueeze(1), uq.transpose(0, 1)).squeeze(1)
rq_o = F.tanh(self.linear_o(rq))
return rq_o
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_exp_max_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 * tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 * tmp1
tmp18 = tmp5 - tmp14
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 * tmp4
tmp21 = tmp17 + tmp20
tmp22 = tmp9 - tmp14
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 * tmp8
tmp25 = tmp21 + tmp24
tmp26 = tmp13 - tmp14
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp27 * tmp12
tmp29 = tmp25 + tmp28
tl.store(out_ptr0 + x0, tmp14, xmask)
tl.store(out_ptr1 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused_add_div_exp_max_mul_sub_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (x1 + 4 * y0), xmask & ymask)
tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp1
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = tmp6 / tmp9
tl.store(out_ptr0 + (x1 + 4 * y0), tmp10, xmask & ymask)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(64)](buf1, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_exp_max_mul_sub_sum_1[grid(4)](buf3, primals_6,
buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_div_exp_max_mul_sub_2[grid(4, 4)](buf3,
primals_6, buf4, buf5, buf6, 4, 4, XBLOCK=4, YBLOCK=4,
num_warps=1, num_stages=1)
del buf4
del buf5
buf7 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (4, 0, 1), 0
), reinterpret_tensor(primals_3, (4, 4, 4), (4, 16, 1), 0), out
=buf7)
buf8 = buf6
del buf6
extern_kernels.mm(reinterpret_tensor(buf7, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_tanh_3[grid(16)](buf9, primals_8, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_8
return buf9, primals_3, primals_6, buf1, buf3, reinterpret_tensor(buf7,
(4, 4), (4, 1), 0), buf9, primals_7, primals_4
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class AttentionPoolingNew(torch.nn.Module):
"""
Attention-Pooling for pointer net init hidden state generate.
Equal to Self-Attention + MLP
Modified from r-net.
Args:
input_size: The number of expected features in the input uq
output_size: The number of expected features in the output rq_o
Inputs: input, mask
- **input** (seq_len, batch, input_size): tensor containing the features
of the input sequence.
- **mask** (batch, seq_len): tensor show whether a padding index for each element in the batch.
Outputs: output
- **output** (batch, output_size): tensor containing the output features
"""
def __init__(self, input_size, output_size):
super(AttentionPoolingNew, self).__init__()
self.linear_u = torch.nn.Linear(input_size, output_size)
self.linear_t = torch.nn.Linear(output_size, 1)
self.linear_o = torch.nn.Linear(input_size, output_size)
def forward(self, input_0, input_1):
primals_1 = self.linear_u.weight
primals_2 = self.linear_u.bias
primals_4 = self.linear_t.weight
primals_5 = self.linear_t.bias
primals_6 = self.linear_o.weight
primals_8 = self.linear_o.bias
primals_3 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| xdong73S/Match_LSTM_v2.0 | AttentionPooling | false | 4,576 | [
"MIT"
] | 0 | dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | https://github.com/xdong73S/Match_LSTM_v2.0/tree/dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | import torch
import torch.utils.data
import torch.nn.functional as F
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class Model(torch.nn.Module):
"""
Attention-Pooling for pointer net init hidden state generate.
Equal to Self-Attention + MLP
Modified from r-net.
Args:
input_size: The number of expected features in the input uq
output_size: The number of expected features in the output rq_o
Inputs: input, mask
- **input** (seq_len, batch, input_size): tensor containing the features
of the input sequence.
- **mask** (batch, seq_len): tensor show whether a padding index for each element in the batch.
Outputs: output
- **output** (batch, output_size): tensor containing the output features
"""
def __init__(self, input_size, output_size):
super().__init__()
self.linear_u = torch.nn.Linear(input_size, output_size)
self.linear_t = torch.nn.Linear(output_size, 1)
self.linear_o = torch.nn.Linear(input_size, output_size)
def forward(self, uq, mask):
q_tanh = F.tanh(self.linear_u(uq))
q_s = self.linear_t(q_tanh).squeeze(2).transpose(0, 1)
alpha = masked_softmax(q_s, mask, dim=1)
rq = torch.bmm(alpha.unsqueeze(1), uq.transpose(0, 1)).squeeze(1)
rq_o = F.tanh(self.linear_o(rq))
return rq_o
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
_MixPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/kn/ckna4lh5sisil3h3mspvym32oaxgycsnrk2xb7lkcz6qg3pah2gc.py
# Topologically Sorted Source Nodes: [max_pool2d, avg_pool2d, add], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d, aten.add]
# Source node to ATen node mapping:
# add => add
# avg_pool2d => avg_pool2d
# max_pool2d => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [4, 4], [1, 1], [0, 0], [1, 1], False), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4], [1, 1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %avg_pool2d), kwargs = {})
triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 + tmp0
tmp32 = tmp3 + tmp31
tmp33 = tmp5 + tmp32
tmp34 = tmp7 + tmp33
tmp35 = tmp9 + tmp34
tmp36 = tmp11 + tmp35
tmp37 = tmp13 + tmp36
tmp38 = tmp15 + tmp37
tmp39 = tmp17 + tmp38
tmp40 = tmp19 + tmp39
tmp41 = tmp21 + tmp40
tmp42 = tmp23 + tmp41
tmp43 = tmp25 + tmp42
tmp44 = tmp27 + tmp43
tmp45 = tmp29 + tmp44
tmp46 = 0.0625
tmp47 = tmp45 * tmp46
tmp48 = tmp30 + tmp47
tl.store(in_out_ptr0 + (x0), tmp48, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [max_pool2d, avg_pool2d, add], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0.run(buf2, arg0_1, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class _MixPool2d(torch.nn.Module):
def __init__(self, kernel_size, stride, padding=0, ceil_mode=False):
super(_MixPool2d, self).__init__()
self.max_pool = torch.nn.MaxPool2d(kernel_size, stride, padding,
ceil_mode=ceil_mode)
self.avg_pool = torch.nn.AvgPool2d(kernel_size, stride, padding,
ceil_mode=ceil_mode)
def forward(self, input):
return self.max_pool(input) + self.avg_pool(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4, 'stride': 1}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0(in_out_ptr0,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 + tmp0
tmp32 = tmp3 + tmp31
tmp33 = tmp5 + tmp32
tmp34 = tmp7 + tmp33
tmp35 = tmp9 + tmp34
tmp36 = tmp11 + tmp35
tmp37 = tmp13 + tmp36
tmp38 = tmp15 + tmp37
tmp39 = tmp17 + tmp38
tmp40 = tmp19 + tmp39
tmp41 = tmp21 + tmp40
tmp42 = tmp23 + tmp41
tmp43 = tmp25 + tmp42
tmp44 = tmp27 + tmp43
tmp45 = tmp29 + tmp44
tmp46 = 0.0625
tmp47 = tmp45 * tmp46
tmp48 = tmp30 + tmp47
tl.store(in_out_ptr0 + x0, tmp48, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_max_pool2d_with_indices_0[grid(16)](
buf2, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf2,
class _MixPool2dNew(torch.nn.Module):
def __init__(self, kernel_size, stride, padding=0, ceil_mode=False):
super(_MixPool2dNew, self).__init__()
self.max_pool = torch.nn.MaxPool2d(kernel_size, stride, padding,
ceil_mode=ceil_mode)
self.avg_pool = torch.nn.AvgPool2d(kernel_size, stride, padding,
ceil_mode=ceil_mode)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| x6rulin/TP-GAN | _MixPool2d | false | 4,577 | [
"MIT"
] | 0 | 1716cf06aaff8a6a2cee2548ec662dcdd68c0449 | https://github.com/x6rulin/TP-GAN/tree/1716cf06aaff8a6a2cee2548ec662dcdd68c0449 | import torch
class Model(torch.nn.Module):
def __init__(self, kernel_size, stride, padding=0, ceil_mode=False):
super().__init__()
self.max_pool = torch.nn.MaxPool2d(kernel_size, stride, padding,
ceil_mode=ceil_mode)
self.avg_pool = torch.nn.AvgPool2d(kernel_size, stride, padding,
ceil_mode=ceil_mode)
def forward(self, input):
return self.max_pool(input) + self.avg_pool(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 1]
|
SelfGated | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/j6/cj63ypsp5wd4xpbcgdrjj2sjbi74adsw4ajccnbd2ift6xmplwm2.py
# Topologically Sorted Source Nodes: [x_gt, x], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# x => mul
# x_gt => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_l], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_gt, x], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(primals_3, buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
class SelfGated(torch.nn.Module):
"""
Self-Gated layer. math: \\sigmoid(W*x) * x
"""
def __init__(self, input_size):
super(SelfGated, self).__init__()
self.linear_g = torch.nn.Linear(input_size, input_size)
def forward(self, x):
x_l = self.linear_g(x)
x_gt = F.sigmoid(x_l)
x = x * x_gt
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](primals_3, buf0, buf1,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_3, buf0
class SelfGatedNew(torch.nn.Module):
"""
Self-Gated layer. math: \\sigmoid(W*x) * x
"""
def __init__(self, input_size):
super(SelfGatedNew, self).__init__()
self.linear_g = torch.nn.Linear(input_size, input_size)
def forward(self, input_0):
primals_1 = self.linear_g.weight
primals_2 = self.linear_g.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| xdong73S/Match_LSTM_v2.0 | SelfGated | false | 4,578 | [
"MIT"
] | 0 | dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | https://github.com/xdong73S/Match_LSTM_v2.0/tree/dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | import torch
import torch.utils.data
import torch.nn.functional as F
class Model(torch.nn.Module):
"""
Self-Gated layer. math: \\sigmoid(W*x) * x
"""
def __init__(self, input_size):
super().__init__()
self.linear_g = torch.nn.Linear(input_size, input_size)
def forward(self, x):
x_l = self.linear_g(x)
x_gt = F.sigmoid(x_l)
x = x * x_gt
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
RingLoss | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/vj/cvjonhukcg5istyp2vzj3xmwqctpymw4xnydshvqt5dmspsjso3r.py
# Topologically Sorted Source Nodes: [norm, sub, pow_1, loss], Original ATen: [aten.linalg_vector_norm, aten.sub, aten.pow, aten.mean, aten.mul]
# Source node to ATen node mapping:
# loss => mean
# norm => pow_1, pow_2, sum_1
# pow_1 => pow_3
# sub => sub
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 1.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_4, 2.0), kwargs = {})
triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp12 = tl.load(in_ptr1 + (0))
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp14 = tmp11 - tmp13
tmp15 = 2.0
tmp16 = tmp14 * tmp15
tmp17 = tmp14 * tmp14
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp21 = 64.0
tmp22 = tmp20 / tmp21
tl.store(out_ptr1 + (tl.broadcast_to(r2, [XBLOCK, RBLOCK])), tmp16, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp22, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [norm, sub, pow_1, loss], Original ATen: [aten.linalg_vector_norm, aten.sub, aten.pow, aten.mean, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0.run(buf3, primals_1, primals_2, buf2, 1, 64, grid=grid(1), stream=stream0)
del primals_1
del primals_2
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import warnings
import torch.nn as nn
from torchvision.transforms import *
class RingLoss(nn.Module):
"""Ring loss.
Reference:
Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018.
"""
def __init__(self):
super(RingLoss, self).__init__()
warnings.warn('This method is deprecated')
self.radius = nn.Parameter(torch.ones(1, dtype=torch.float))
def forward(self, x):
loss = ((x.norm(p=2, dim=1) - self.radius) ** 2).mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import warnings
import torch.nn as nn
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp12 = tl.load(in_ptr1 + 0)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp14 = tmp11 - tmp13
tmp15 = 2.0
tmp16 = tmp14 * tmp15
tmp17 = tmp14 * tmp14
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp20 = tl.sum(tmp18, 1)[:, None]
tmp21 = 64.0
tmp22 = tmp20 / tmp21
tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp16, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0[grid(1)](buf3,
primals_1, primals_2, buf2, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_1
del primals_2
return buf3, buf2
class RingLossNew(nn.Module):
"""Ring loss.
Reference:
Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018.
"""
def __init__(self):
super(RingLossNew, self).__init__()
warnings.warn('This method is deprecated')
self.radius = nn.Parameter(torch.ones(1, dtype=torch.float))
def forward(self, input_0):
primals_2 = self.radius
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| xijiali/ABD_Net | RingLoss | false | 4,579 | [
"MIT"
] | 0 | 8d2d9b316b7c181ce441ceb4b1c62fb9a6d53153 | https://github.com/xijiali/ABD_Net/tree/8d2d9b316b7c181ce441ceb4b1c62fb9a6d53153 | import torch
import warnings
import torch.nn as nn
from torchvision.transforms import *
class Model(nn.Module):
"""Ring loss.
Reference:
Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018.
"""
def __init__(self):
super().__init__()
warnings.warn('This method is deprecated')
self.radius = nn.Parameter(torch.ones(1, dtype=torch.float))
def forward(self, x):
loss = ((x.norm(p=2, dim=1) - self.radius) ** 2).mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AxialEncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cf/ccfb6ec7gjxmag3ujecjz3vcmyoemkahdlpm7h6hhek4dpm3jhpe.py
# Topologically Sorted Source Nodes: [mean, var, add, std, sub, x, x_1, x_2], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# std => sqrt
# sub => sub
# var => var
# x => mul
# x_1 => div
# x_2 => add_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %sqrt), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 / tmp9
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp12 / tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yg/cygetp252fgyjtko7svpkgbjp7wo427qeflipv4ooag7thzxfplv.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default_12, 1.0), kwargs = {})
# %clone_default_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_4,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/id/cidtmanoekdt6t4hzyhgsdim7m6nulkmrcvmjxjsbu6w7ij3gguj.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_default_14, [-1], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_default_14, %amax_default_1), kwargs = {})
# %exp_default_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor_1,), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k7/ck7mt47gcsvjuojmxi43bbyt6bs42tim2wg4xlxeg6puq33qdj6g.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %sum_dim_int_list_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default_1, [-1], True), kwargs = {})
# %div_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default_1, %sum_dim_int_list_2), kwargs = {})
# %eq_scalar_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%view_default_14, -inf), kwargs = {})
# %logical_not_default_2 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar_1,), kwargs = {})
# %any_dim_1 : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default_2, -1, True), kwargs = {})
# %logical_not_default_3 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim_1,), kwargs = {})
# %full_default_2 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([16, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_3, %full_default_2, %div_tensor_1), kwargs = {})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x2), xmask)
tmp26 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = float("-inf")
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = (tmp4 != 0)
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = (tmp9 != 0)
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = (tmp15 != 0)
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = (tmp21 != 0)
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/t4/ct46o5ivyd6ymdx3ffopahljt5kagycaak73cydyio4wh2zrdkkh.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_5 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/if/cif7yykj5nniiycohjohrh2ilehi72gkyw6tc4denoyhtbnfxy2c.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/s7/cs74uvx7kqz5txtfl237kdsbcgl56s3qansoqmj4gkklbmmmmg2f.py
# Topologically Sorted Source Nodes: [src, mean_2, var_1], Original ATen: [aten.add, aten.mean, aten.var]
# Source node to ATen node mapping:
# mean_2 => mean_1
# src => add_2
# var_1 => var_1
# Graph fragment:
# %add_2 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_25), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_2, [-1], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add_2, [-1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_mean_var_6 = async_compile.triton('triton_poi_fused_add_mean_var_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_var_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_var_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(in_out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wx/cwxzak5aidpmtsbnfuzdxn2rq4js6sc5wm4ul7ocmw3x4ibuqjhg.py
# Topologically Sorted Source Nodes: [src2_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# src2_2 => clone_6
# Graph fragment:
# %clone_6 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_10,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x5 = (xindex // 4)
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask)
tmp2 = tl.load(in_ptr2 + (x4), xmask)
tmp4 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/n5/cn5yemnqw5hucudvzc4jwkhfq7hvnrykttrj2f2bu6qcn4ket663.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src => add_2
# src_1 => add_5
# Graph fragment:
# %add_2 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_25), kwargs = {})
# %add_5 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %permute_20), kwargs = {})
triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask)
tmp3 = tl.load(in_ptr2 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5s/c5stk5glscu2vhkyygzl37kwox56bq47ibhaq46siyunrac2divm.py
# Topologically Sorted Source Nodes: [relu_], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu_ => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_48,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_53, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cs/ccsko6axmghf5opun7oldfuzwck2miyrvybdwzv5jxatbhz5oca4.py
# Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.view]
# Source node to ATen node mapping:
# src_2 => view_54
# Graph fragment:
# %view_54 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_53, [64, 4]), kwargs = {})
triton_poi_fused_view_10 = async_compile.triton('triton_poi_fused_view_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/je/cjeto5udyicynsheseaalnjgngxbugumtczwhn3wzbze5qqm7eyu.py
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src_3 => add_8
# Graph fragment:
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %view_55), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4, ), (1, ))
assert_size_stride(primals_20, (4, 4), (4, 1))
assert_size_stride(primals_21, (4, ), (1, ))
assert_size_stride(primals_22, (4, ), (1, ))
assert_size_stride(primals_23, (4, ), (1, ))
assert_size_stride(primals_24, (4, 4), (4, 1))
assert_size_stride(primals_25, (4, ), (1, ))
assert_size_stride(primals_26, (4, 4), (4, 1))
assert_size_stride(primals_27, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, var, add, std, sub, x, x_1, x_2], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf1, primals_5, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_5
buf5 = reinterpret_tensor(buf1, (16, 4, 1, 4), (16, 4, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf2, primals_7, buf5, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (64, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf6, buf7, 1024, grid=grid(1024), stream=stream0)
buf8 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf6, buf7, buf8, 1024, grid=grid(1024), stream=stream0)
buf9 = reinterpret_tensor(buf2, (16, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(buf3, primals_9, buf9, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_9
buf10 = reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf8, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (64, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf10, buf11, 64, 4, grid=grid(64, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_11
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf14 = buf13; del buf13 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [src, mean_2, var_1], Original ATen: [aten.add, aten.mean, aten.var]
triton_poi_fused_add_mean_var_6.run(buf14, primals_1, buf12, buf15, 64, grid=grid(64), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src2_2], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(primals_12, primals_1, buf12, buf15, buf14, primals_13, buf16, 256, grid=grid(256), stream=stream0)
del buf14
del buf15
del primals_13
buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf17)
buf18 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf18)
buf19 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf19)
buf20 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf17, primals_15, buf20, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_15
buf21 = reinterpret_tensor(buf17, (16, 4, 1, 4), (16, 4, 4, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf18, primals_17, buf21, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_17
buf22 = reinterpret_tensor(buf7, (64, 4, 4), (16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf20, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf21, (64, 1, 4), (4, 0, 1), 0), out=buf22)
buf23 = reinterpret_tensor(buf6, (16, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf22, buf23, 1024, grid=grid(1024), stream=stream0)
buf24 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf22, buf23, buf24, 1024, grid=grid(1024), stream=stream0)
del buf22
del buf23
buf25 = reinterpret_tensor(buf18, (16, 4, 4, 1), (16, 4, 1, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(buf19, primals_19, buf25, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_19
buf26 = reinterpret_tensor(buf19, (64, 4, 1), (4, 1, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf24, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf25, (64, 4, 1), (4, 1, 0), 0), out=buf26)
buf27 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf26, buf27, 64, 4, grid=grid(64, 4), stream=stream0)
buf28 = reinterpret_tensor(buf26, (64, 4), (4, 1), 0); del buf26 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf27, (64, 4), (4, 1), 0), reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf28)
buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add]
triton_poi_fused_add_8.run(primals_1, buf12, buf28, primals_21, buf29, 256, grid=grid(256), stream=stream0)
del primals_21
buf30 = reinterpret_tensor(buf28, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf28 # reuse
# Topologically Sorted Source Nodes: [mean_4, var_2, add_4, std_2, sub_2, x_6, x_7, x_8], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0.run(primals_22, buf29, primals_23, buf30, 256, grid=grid(256), stream=stream0)
del primals_23
buf31 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf30, (64, 4), (4, 1), 0), reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf31)
buf32 = reinterpret_tensor(buf31, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf31 # reuse
buf36 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu_], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_9.run(buf32, primals_25, buf36, 256, grid=grid(256), stream=stream0)
del primals_25
buf33 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.view]
triton_poi_fused_view_10.run(buf32, buf33, 256, grid=grid(256), stream=stream0)
buf34 = reinterpret_tensor(buf32, (64, 4), (4, 1), 0); del buf32 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf33, reinterpret_tensor(primals_26, (4, 4), (1, 4), 0), out=buf34)
buf35 = reinterpret_tensor(buf34, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf34 # reuse
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf35, buf29, primals_27, 256, grid=grid(256), stream=stream0)
del primals_27
return (buf35, primals_1, primals_12, primals_22, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf9, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (64, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf11, (64, 4), (4, 1), 0), buf12, reinterpret_tensor(buf16, (64, 4), (4, 1), 0), buf24, reinterpret_tensor(buf25, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf20, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf21, (64, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf27, (64, 4), (4, 1), 0), buf29, reinterpret_tensor(buf30, (64, 4), (4, 1), 0), buf33, primals_26, buf36, primals_24, primals_20, primals_18, primals_16, primals_14, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def orthogonal_matrix_chunk(cols, qr_uniform_q=False, device=None):
unstructured_block = torch.randn((cols, cols), device=device)
q, r = torch.linalg.qr(unstructured_block.cpu(), 'reduced')
q, r = map(lambda t: t, (q, r))
if qr_uniform_q:
d = torch.diag(r, 0)
q *= d.sign()
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0,
qr_uniform_q=False, device=None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim
=1)
elif scaling == 1:
multiplier = math.sqrt(float(nb_columns)) * torch.ones((nb_rows,),
device=device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
def generalized_kernel(data, *, projection_matrix, kernel_fn=nn.ReLU(
inplace=True), kernel_epsilon=0.001, normalize_data=True, device=None):
_b, _h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
data = data_normalizer * data
data = torch.matmul(data, projection_matrix.T)
data = kernel_fn(data) + kernel_epsilon
return data.type_as(data)
def linear_attention(q, k, v):
L = k.shape[-2]
D_inv = 1.0 / torch.einsum('...nd,...d->...n', q, k.mean(dim=-2))
context = torch.einsum('...nd,...ne->...de', k / float(L), v)
del k, v
out = torch.einsum('...n,...nd->...nd', D_inv, q)
del D_inv, q
out = torch.einsum('...nd,...de->...ne', out, context)
return out
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=
True, eps=0.0001, device=None):
b, h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
ratio = projection_matrix.shape[0] ** -0.5
projection = projection_matrix.unsqueeze(0).repeat(h, 1, 1)
projection = projection.unsqueeze(0).repeat(b, 1, 1, 1)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', data_normalizer * data,
projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = diag_data / 2.0 * data_normalizer ** 2
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash)) + eps)
return data_dash.type_as(data)
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module,
type)]
def get_module_device(module):
return next(module.parameters()).device
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class MultiheadAttention(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super(MultiheadAttention, self).__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value):
batch, L = query.shape[:2]
q = self.to_query(query).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, self.d_k).permute(0,
2, 1, 3)
v = self.to_value(value).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
attention = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.matmul(attention, v)
out = out.permute(0, 2, 1, 3).contiguous().view(batch, L, -1)
out = self.to_out(out)
return out
class FastAttention(nn.Module):
def __init__(self, dim_heads, nb_features=None, ortho_scaling=0,
generalized_attention=False, kernel_fn=nn.ReLU(inplace=True),
qr_uniform_q=False, no_projection=False):
super().__init__()
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads))
)
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
if not no_projection:
self.create_projection = partial(gaussian_orthogonal_random_matrix,
nb_rows=self.nb_features, nb_columns=dim_heads, scaling=
ortho_scaling, qr_uniform_q=qr_uniform_q)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
self.no_projection = no_projection
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device=device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, q, k, v):
device = q.device
if self.no_projection:
q = q.softmax(dim=-1)
k.softmax(dim=-2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn=self.
kernel_fn, projection_matrix=self.projection_matrix, device
=device)
q, k = map(create_kernel, (q, k))
else:
create_kernel = partial(softmax_kernel, projection_matrix=self.
projection_matrix, device=device)
q = create_kernel(q, is_query=True)
k = create_kernel(k, is_query=False)
attn_fn = linear_attention
out = attn_fn(q, k, v)
return out
class SelfAttention(nn.Module):
def __init__(self, dim, k_dim=None, heads=8, local_heads=0,
local_window_size=256, nb_features=None, feature_redraw_interval=
1000, generalized_attention=False, kernel_fn=nn.ReLU(inplace=True),
qr_uniform_q=False, dropout=0.0, no_projection=False):
super().__init__()
assert dim % heads == 0, 'dimension must be divisible by number of heads'
dim_head = dim // heads
inner_dim = dim_head * heads
if k_dim is None:
k_dim = dim
self.fast_attention = FastAttention(dim_head, nb_features,
generalized_attention=generalized_attention, kernel_fn=
kernel_fn, qr_uniform_q=qr_uniform_q, no_projection=no_projection)
self.heads = heads
self.dim = dim
self.to_query = nn.Linear(dim, inner_dim)
self.to_key = nn.Linear(k_dim, inner_dim)
self.to_value = nn.Linear(k_dim, inner_dim)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout, inplace=True)
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
self.max_tokens = 2 ** 16
def check_redraw_projections(self):
if not self.training:
return
if exists(self.feature_redraw_interval
) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(self)
fast_attentions = find_modules(self, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def _batched_forward(self, q, k, v):
b1, h, n1 = q.shape[:3]
out = torch.empty((b1, h, n1, self.dim // h), dtype=q.dtype, device
=q.device)
shift = self.max_tokens // n1
for i_b in range(0, b1, shift):
start = i_b
end = min(i_b + shift, b1)
out[start:end] = self.fast_attention(q[start:end], k[start:end],
v[start:end])
return out
def forward(self, query, key, value, **kwargs):
self.check_redraw_projections()
b1, n1, _, h = *query.shape, self.heads
b2, n2, _, h = *key.shape, self.heads
q = self.to_query(query)
k = self.to_key(key)
v = self.to_value(value)
q = q.reshape(b1, n1, h, -1).permute(0, 2, 1, 3)
k = k.reshape(b2, n2, h, -1).permute(0, 2, 1, 3)
v = v.reshape(b2, n2, h, -1).permute(0, 2, 1, 3)
if b1 * n1 > self.max_tokens or b2 * n2 > self.max_tokens:
out = self._batched_forward(q, k, v)
else:
out = self.fast_attention(q, k, v)
out = out.permute(0, 2, 1, 3).reshape(b1, n1, -1)
out = self.to_out(out)
return self.dropout(out)
class SequenceWeight(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super(SequenceWeight, self).__init__()
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.scale = 1.0 / math.sqrt(self.d_k)
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, msa):
B, N, L = msa.shape[:3]
msa = msa.permute(0, 2, 1, 3)
tar_seq = msa[:, :, 0].unsqueeze(2)
q = self.to_query(tar_seq).view(B, L, 1, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
k = self.to_key(msa).view(B, L, N, self.heads, self.d_k).permute(0,
1, 3, 4, 2).contiguous()
q = q * self.scale
attn = torch.matmul(q, k)
attn = F.softmax(attn, dim=-1)
return self.dropout(attn)
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_ff, p_drop=0.1):
super(FeedForwardLayer, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(p_drop, inplace=True)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, src):
src = self.linear2(self.dropout(F.relu_(self.linear1(src))))
return src
class TiedMultiheadAttention(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super(TiedMultiheadAttention, self).__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.scaling = 1 / math.sqrt(self.d_k)
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, query, key, value, return_att=False):
B, N, L = query.shape[:3]
q = self.to_query(query).view(B, N, L, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
k = self.to_key(key).view(B, N, L, self.heads, self.d_k).permute(0,
1, 3, 4, 2).contiguous()
v = self.to_value(value).view(B, N, L, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
scale = self.scaling / math.sqrt(N)
q = q * scale
attention = torch.einsum('bnhik,bnhkj->bhij', q, k)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
attention = attention.unsqueeze(1)
out = torch.matmul(attention, v)
out = out.permute(0, 1, 3, 2, 4).contiguous().view(B, N, L, -1)
out = self.to_out(out)
if return_att:
attention = attention.squeeze(1)
attention = 0.5 * (attention + attention.permute(0, 1, 3, 2))
attention = attention.permute(0, 3, 1, 2)
return out, attention
return out
class SoftTiedMultiheadAttention(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super(SoftTiedMultiheadAttention, self).__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.scale = 1.0 / math.sqrt(self.d_k)
self.seq_weight = SequenceWeight(d_model, heads, dropout=dropout)
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, query, key, value, return_att=False):
B, N, L = query.shape[:3]
seq_weight = self.seq_weight(query)
seq_weight = seq_weight.permute(0, 4, 2, 1, 3)
q = self.to_query(query).view(B, N, L, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
k = self.to_key(key).view(B, N, L, self.heads, self.d_k).permute(0,
1, 3, 4, 2).contiguous()
v = self.to_value(value).view(B, N, L, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
q = q * seq_weight
k = k * self.scale
attention = torch.einsum('bnhik,bnhkj->bhij', q, k)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
attention = attention
del q, k, seq_weight
out = torch.einsum('bhij,bnhjk->bnhik', attention, v)
out = out.permute(0, 1, 3, 2, 4).contiguous().view(B, N, L, -1)
out = self.to_out(out)
if return_att:
attention = attention.squeeze(1)
attention = 0.5 * (attention + attention.permute(0, 1, 3, 2))
attention = attention.permute(0, 2, 3, 1)
return out, attention
return out
class AxialEncoderLayer(nn.Module):
def __init__(self, d_model, d_ff, heads, p_drop=0.1, performer_opts=
None, use_tied_row=False, use_tied_col=False, use_soft_row=False):
super(AxialEncoderLayer, self).__init__()
self.use_performer = performer_opts is not None
self.use_tied_row = use_tied_row
self.use_tied_col = use_tied_col
self.use_soft_row = use_soft_row
if use_tied_row:
self.attn_L = TiedMultiheadAttention(d_model, heads, dropout=p_drop
)
elif use_soft_row:
self.attn_L = SoftTiedMultiheadAttention(d_model, heads,
dropout=p_drop)
elif self.use_performer:
self.attn_L = SelfAttention(dim=d_model, heads=heads, dropout=
p_drop, generalized_attention=True, **performer_opts)
else:
self.attn_L = MultiheadAttention(d_model, heads, dropout=p_drop)
if use_tied_col:
self.attn_N = TiedMultiheadAttention(d_model, heads, dropout=p_drop
)
elif self.use_performer:
self.attn_N = SelfAttention(dim=d_model, heads=heads, dropout=
p_drop, generalized_attention=True, **performer_opts)
else:
self.attn_N = MultiheadAttention(d_model, heads, dropout=p_drop)
self.ff = FeedForwardLayer(d_model, d_ff, p_drop=p_drop)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout1 = nn.Dropout(p_drop, inplace=True)
self.dropout2 = nn.Dropout(p_drop, inplace=True)
self.dropout3 = nn.Dropout(p_drop, inplace=True)
def forward(self, src, return_att=False):
B, N, L = src.shape[:3]
src2 = self.norm1(src)
if self.use_tied_row or self.use_soft_row:
src2 = self.attn_L(src2, src2, src2)
else:
src2 = src2.reshape(B * N, L, -1)
src2 = self.attn_L(src2, src2, src2)
src2 = src2.reshape(B, N, L, -1)
src = src + self.dropout1(src2)
src2 = self.norm2(src)
if self.use_tied_col:
src2 = src2.permute(0, 2, 1, 3)
src2 = self.attn_N(src2, src2, src2)
src2 = src2.permute(0, 2, 1, 3)
else:
src2 = src2.permute(0, 2, 1, 3).reshape(B * L, N, -1)
src2 = self.attn_N(src2, src2, src2)
src2 = src2.reshape(B, L, N, -1).permute(0, 2, 1, 3)
src = src + self.dropout2(src2)
src2 = self.norm3(src)
src2 = self.ff(src2)
src = src + self.dropout3(src2)
return src
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 / tmp9
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp12 / tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr1 + x2, xmask)
tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = float('-inf')
tmp2 = tmp0 == tmp1
tmp3 = tmp2 == 0
tmp4 = tmp3.to(tl.int64)
tmp5 = tmp4 != 0
tmp7 = tmp6 == tmp1
tmp8 = tmp7 == 0
tmp9 = tmp8.to(tl.int64)
tmp10 = tmp9 != 0
tmp11 = tmp5 | tmp10
tmp13 = tmp12 == tmp1
tmp14 = tmp13 == 0
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 != 0
tmp17 = tmp11 | tmp16
tmp19 = tmp18 == tmp1
tmp20 = tmp19 == 0
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp21 != 0
tmp23 = tmp17 | tmp22
tmp24 = tmp23 == 0
tmp28 = tmp26 + tmp27
tmp30 = tmp28 + tmp29
tmp32 = tmp30 + tmp31
tmp33 = tmp25 / tmp32
tmp34 = 0.0
tmp35 = tl.where(tmp24, tmp34, tmp33)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_var_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(in_out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x5 = xindex // 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp2 = tl.load(in_ptr2 + x4, xmask)
tmp4 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp13, xmask)
@triton.jit
def triton_poi_fused_add_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp3 = tl.load(in_ptr2 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4), (4, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4, 4), (4, 1))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4,), (1,))
assert_size_stride(primals_23, (4,), (1,))
assert_size_stride(primals_24, (4, 4), (4, 1))
assert_size_stride(primals_25, (4,), (1,))
assert_size_stride(primals_26, (4, 4), (4, 1))
assert_size_stride(primals_27, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_1[grid(64, 4)](buf1, primals_5, buf4, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_5
buf5 = reinterpret_tensor(buf1, (16, 4, 1, 4), (16, 4, 4, 1), 0)
del buf1
triton_poi_fused_1[grid(64, 4)](buf2, primals_7, buf5, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (64, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_2[grid(1024)](buf6, buf7, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_3[grid(1024)](buf6, buf7, buf8, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 4, 1), (16, 4, 1, 1), 0)
del buf2
triton_poi_fused_4[grid(64, 4)](buf3, primals_9, buf9, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_9
buf10 = reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf8, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (64, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_11
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_mean_var_6[grid(64)](buf14, primals_1, buf12,
buf15, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_7[grid(256)](primals_12, primals_1, buf12,
buf15, buf14, primals_13, buf16, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf14
del buf15
del primals_13
buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf17)
buf18 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf18)
buf19 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf19)
buf20 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_1[grid(64, 4)](buf17, primals_15, buf20, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_15
buf21 = reinterpret_tensor(buf17, (16, 4, 1, 4), (16, 4, 4, 1), 0)
del buf17
triton_poi_fused_1[grid(64, 4)](buf18, primals_17, buf21, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_17
buf22 = reinterpret_tensor(buf7, (64, 4, 4), (16, 4, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(buf20, (64, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf21, (64, 1, 4), (4, 0, 1), 0), out=buf22)
buf23 = reinterpret_tensor(buf6, (16, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_2[grid(1024)](buf22, buf23, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf24 = empty_strided_cuda((16, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
triton_poi_fused_3[grid(1024)](buf22, buf23, buf24, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del buf22
del buf23
buf25 = reinterpret_tensor(buf18, (16, 4, 4, 1), (16, 4, 1, 1), 0)
del buf18
triton_poi_fused_4[grid(64, 4)](buf19, primals_19, buf25, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_19
buf26 = reinterpret_tensor(buf19, (64, 4, 1), (4, 1, 1), 0)
del buf19
extern_kernels.bmm(reinterpret_tensor(buf24, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf25, (64, 4, 1), (4, 1, 0), 0), out=buf26)
buf27 = empty_strided_cuda((16, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(64, 4)](buf26, buf27, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf28 = reinterpret_tensor(buf26, (64, 4), (4, 1), 0)
del buf26
extern_kernels.mm(reinterpret_tensor(buf27, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf28)
buf29 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_8[grid(256)](primals_1, buf12, buf28,
primals_21, buf29, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_21
buf30 = reinterpret_tensor(buf28, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf28
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_22,
buf29, primals_23, buf30, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_23
buf31 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf30, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_24, (4, 4), (1, 4), 0), out=buf31)
buf32 = reinterpret_tensor(buf31, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf31
buf36 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_9[grid(256)](buf32,
primals_25, buf36, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_25
buf33 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_10[grid(256)](buf32, buf33, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf34 = reinterpret_tensor(buf32, (64, 4), (4, 1), 0)
del buf32
extern_kernels.mm(buf33, reinterpret_tensor(primals_26, (4, 4), (1,
4), 0), out=buf34)
buf35 = reinterpret_tensor(buf34, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf34
triton_poi_fused_add_11[grid(256)](buf35, buf29, primals_27, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_27
return (buf35, primals_1, primals_12, primals_22, reinterpret_tensor(
buf0, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf9, (64, 1, 4
), (4, 1, 1), 0), reinterpret_tensor(buf4, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (64, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf11, (64, 4), (4, 1), 0), buf12,
reinterpret_tensor(buf16, (64, 4), (4, 1), 0), buf24,
reinterpret_tensor(buf25, (64, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf20, (64, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf21, (64, 4, 1), (4, 1, 4), 0),
reinterpret_tensor(buf27, (64, 4), (4, 1), 0), buf29,
reinterpret_tensor(buf30, (64, 4), (4, 1), 0), buf33, primals_26,
buf36, primals_24, primals_20, primals_18, primals_16, primals_14,
primals_10, primals_8, primals_6, primals_4)
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def orthogonal_matrix_chunk(cols, qr_uniform_q=False, device=None):
unstructured_block = torch.randn((cols, cols), device=device)
q, r = torch.linalg.qr(unstructured_block.cpu(), 'reduced')
q, r = map(lambda t: t, (q, r))
if qr_uniform_q:
d = torch.diag(r, 0)
q *= d.sign()
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0,
qr_uniform_q=False, device=None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim
=1)
elif scaling == 1:
multiplier = math.sqrt(float(nb_columns)) * torch.ones((nb_rows,),
device=device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
def generalized_kernel(data, *, projection_matrix, kernel_fn=nn.ReLU(
inplace=True), kernel_epsilon=0.001, normalize_data=True, device=None):
_b, _h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
data = data_normalizer * data
data = torch.matmul(data, projection_matrix.T)
data = kernel_fn(data) + kernel_epsilon
return data.type_as(data)
def linear_attention(q, k, v):
L = k.shape[-2]
D_inv = 1.0 / torch.einsum('...nd,...d->...n', q, k.mean(dim=-2))
context = torch.einsum('...nd,...ne->...de', k / float(L), v)
del k, v
out = torch.einsum('...n,...nd->...nd', D_inv, q)
del D_inv, q
out = torch.einsum('...nd,...de->...ne', out, context)
return out
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=
True, eps=0.0001, device=None):
b, h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
ratio = projection_matrix.shape[0] ** -0.5
projection = projection_matrix.unsqueeze(0).repeat(h, 1, 1)
projection = projection.unsqueeze(0).repeat(b, 1, 1, 1)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', data_normalizer * data,
projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = diag_data / 2.0 * data_normalizer ** 2
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash)) + eps)
return data_dash.type_as(data)
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module,
type)]
def get_module_device(module):
return next(module.parameters()).device
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class MultiheadAttention(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super(MultiheadAttention, self).__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value):
batch, L = query.shape[:2]
q = self.to_query(query).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, self.d_k).permute(0,
2, 1, 3)
v = self.to_value(value).view(batch, L, self.heads, self.d_k).permute(
0, 2, 1, 3)
attention = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.matmul(attention, v)
out = out.permute(0, 2, 1, 3).contiguous().view(batch, L, -1)
out = self.to_out(out)
return out
class FastAttention(nn.Module):
def __init__(self, dim_heads, nb_features=None, ortho_scaling=0,
generalized_attention=False, kernel_fn=nn.ReLU(inplace=True),
qr_uniform_q=False, no_projection=False):
super().__init__()
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads))
)
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
if not no_projection:
self.create_projection = partial(gaussian_orthogonal_random_matrix,
nb_rows=self.nb_features, nb_columns=dim_heads, scaling=
ortho_scaling, qr_uniform_q=qr_uniform_q)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
self.no_projection = no_projection
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device=device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, q, k, v):
device = q.device
if self.no_projection:
q = q.softmax(dim=-1)
k.softmax(dim=-2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn=self.
kernel_fn, projection_matrix=self.projection_matrix, device
=device)
q, k = map(create_kernel, (q, k))
else:
create_kernel = partial(softmax_kernel, projection_matrix=self.
projection_matrix, device=device)
q = create_kernel(q, is_query=True)
k = create_kernel(k, is_query=False)
attn_fn = linear_attention
out = attn_fn(q, k, v)
return out
class SelfAttention(nn.Module):
def __init__(self, dim, k_dim=None, heads=8, local_heads=0,
local_window_size=256, nb_features=None, feature_redraw_interval=
1000, generalized_attention=False, kernel_fn=nn.ReLU(inplace=True),
qr_uniform_q=False, dropout=0.0, no_projection=False):
super().__init__()
assert dim % heads == 0, 'dimension must be divisible by number of heads'
dim_head = dim // heads
inner_dim = dim_head * heads
if k_dim is None:
k_dim = dim
self.fast_attention = FastAttention(dim_head, nb_features,
generalized_attention=generalized_attention, kernel_fn=
kernel_fn, qr_uniform_q=qr_uniform_q, no_projection=no_projection)
self.heads = heads
self.dim = dim
self.to_query = nn.Linear(dim, inner_dim)
self.to_key = nn.Linear(k_dim, inner_dim)
self.to_value = nn.Linear(k_dim, inner_dim)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout, inplace=True)
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
self.max_tokens = 2 ** 16
def check_redraw_projections(self):
if not self.training:
return
if exists(self.feature_redraw_interval
) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(self)
fast_attentions = find_modules(self, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def _batched_forward(self, q, k, v):
b1, h, n1 = q.shape[:3]
out = torch.empty((b1, h, n1, self.dim // h), dtype=q.dtype, device
=q.device)
shift = self.max_tokens // n1
for i_b in range(0, b1, shift):
start = i_b
end = min(i_b + shift, b1)
out[start:end] = self.fast_attention(q[start:end], k[start:end],
v[start:end])
return out
def forward(self, query, key, value, **kwargs):
self.check_redraw_projections()
b1, n1, _, h = *query.shape, self.heads
b2, n2, _, h = *key.shape, self.heads
q = self.to_query(query)
k = self.to_key(key)
v = self.to_value(value)
q = q.reshape(b1, n1, h, -1).permute(0, 2, 1, 3)
k = k.reshape(b2, n2, h, -1).permute(0, 2, 1, 3)
v = v.reshape(b2, n2, h, -1).permute(0, 2, 1, 3)
if b1 * n1 > self.max_tokens or b2 * n2 > self.max_tokens:
out = self._batched_forward(q, k, v)
else:
out = self.fast_attention(q, k, v)
out = out.permute(0, 2, 1, 3).reshape(b1, n1, -1)
out = self.to_out(out)
return self.dropout(out)
class SequenceWeight(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super(SequenceWeight, self).__init__()
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.scale = 1.0 / math.sqrt(self.d_k)
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, msa):
B, N, L = msa.shape[:3]
msa = msa.permute(0, 2, 1, 3)
tar_seq = msa[:, :, 0].unsqueeze(2)
q = self.to_query(tar_seq).view(B, L, 1, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
k = self.to_key(msa).view(B, L, N, self.heads, self.d_k).permute(0,
1, 3, 4, 2).contiguous()
q = q * self.scale
attn = torch.matmul(q, k)
attn = F.softmax(attn, dim=-1)
return self.dropout(attn)
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_ff, p_drop=0.1):
super(FeedForwardLayer, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(p_drop, inplace=True)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, src):
src = self.linear2(self.dropout(F.relu_(self.linear1(src))))
return src
class TiedMultiheadAttention(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super(TiedMultiheadAttention, self).__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.scaling = 1 / math.sqrt(self.d_k)
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, query, key, value, return_att=False):
B, N, L = query.shape[:3]
q = self.to_query(query).view(B, N, L, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
k = self.to_key(key).view(B, N, L, self.heads, self.d_k).permute(0,
1, 3, 4, 2).contiguous()
v = self.to_value(value).view(B, N, L, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
scale = self.scaling / math.sqrt(N)
q = q * scale
attention = torch.einsum('bnhik,bnhkj->bhij', q, k)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
attention = attention.unsqueeze(1)
out = torch.matmul(attention, v)
out = out.permute(0, 1, 3, 2, 4).contiguous().view(B, N, L, -1)
out = self.to_out(out)
if return_att:
attention = attention.squeeze(1)
attention = 0.5 * (attention + attention.permute(0, 1, 3, 2))
attention = attention.permute(0, 3, 1, 2)
return out, attention
return out
class SoftTiedMultiheadAttention(nn.Module):
def __init__(self, d_model, heads, k_dim=None, v_dim=None, dropout=0.1):
super(SoftTiedMultiheadAttention, self).__init__()
if k_dim is None:
k_dim = d_model
if v_dim is None:
v_dim = d_model
self.heads = heads
self.d_model = d_model
self.d_k = d_model // heads
self.scale = 1.0 / math.sqrt(self.d_k)
self.seq_weight = SequenceWeight(d_model, heads, dropout=dropout)
self.to_query = nn.Linear(d_model, d_model)
self.to_key = nn.Linear(k_dim, d_model)
self.to_value = nn.Linear(v_dim, d_model)
self.to_out = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, query, key, value, return_att=False):
B, N, L = query.shape[:3]
seq_weight = self.seq_weight(query)
seq_weight = seq_weight.permute(0, 4, 2, 1, 3)
q = self.to_query(query).view(B, N, L, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
k = self.to_key(key).view(B, N, L, self.heads, self.d_k).permute(0,
1, 3, 4, 2).contiguous()
v = self.to_value(value).view(B, N, L, self.heads, self.d_k).permute(
0, 1, 3, 2, 4).contiguous()
q = q * seq_weight
k = k * self.scale
attention = torch.einsum('bnhik,bnhkj->bhij', q, k)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
attention = attention
del q, k, seq_weight
out = torch.einsum('bhij,bnhjk->bnhik', attention, v)
out = out.permute(0, 1, 3, 2, 4).contiguous().view(B, N, L, -1)
out = self.to_out(out)
if return_att:
attention = attention.squeeze(1)
attention = 0.5 * (attention + attention.permute(0, 1, 3, 2))
attention = attention.permute(0, 2, 3, 1)
return out, attention
return out
class AxialEncoderLayerNew(nn.Module):
def __init__(self, d_model, d_ff, heads, p_drop=0.1, performer_opts=
None, use_tied_row=False, use_tied_col=False, use_soft_row=False):
super(AxialEncoderLayerNew, self).__init__()
self.use_performer = performer_opts is not None
self.use_tied_row = use_tied_row
self.use_tied_col = use_tied_col
self.use_soft_row = use_soft_row
if use_tied_row:
self.attn_L = TiedMultiheadAttention(d_model, heads, dropout=p_drop
)
elif use_soft_row:
self.attn_L = SoftTiedMultiheadAttention(d_model, heads,
dropout=p_drop)
elif self.use_performer:
self.attn_L = SelfAttention(dim=d_model, heads=heads, dropout=
p_drop, generalized_attention=True, **performer_opts)
else:
self.attn_L = MultiheadAttention(d_model, heads, dropout=p_drop)
if use_tied_col:
self.attn_N = TiedMultiheadAttention(d_model, heads, dropout=p_drop
)
elif self.use_performer:
self.attn_N = SelfAttention(dim=d_model, heads=heads, dropout=
p_drop, generalized_attention=True, **performer_opts)
else:
self.attn_N = MultiheadAttention(d_model, heads, dropout=p_drop)
self.ff = FeedForwardLayer(d_model, d_ff, p_drop=p_drop)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout1 = nn.Dropout(p_drop, inplace=True)
self.dropout2 = nn.Dropout(p_drop, inplace=True)
self.dropout3 = nn.Dropout(p_drop, inplace=True)
def forward(self, input_0):
primals_4 = self.attn_L.to_query.weight
primals_2 = self.attn_L.to_query.bias
primals_6 = self.attn_L.to_key.weight
primals_3 = self.attn_L.to_key.bias
primals_8 = self.attn_L.to_value.weight
primals_5 = self.attn_L.to_value.bias
primals_10 = self.attn_L.to_out.weight
primals_7 = self.attn_L.to_out.bias
primals_14 = self.attn_N.to_query.weight
primals_9 = self.attn_N.to_query.bias
primals_16 = self.attn_N.to_key.weight
primals_11 = self.attn_N.to_key.bias
primals_18 = self.attn_N.to_value.weight
primals_12 = self.attn_N.to_value.bias
primals_20 = self.attn_N.to_out.weight
primals_13 = self.attn_N.to_out.bias
primals_24 = self.ff.linear1.weight
primals_15 = self.ff.linear1.bias
primals_26 = self.ff.linear2.weight
primals_17 = self.ff.linear2.bias
primals_19 = self.norm1.a_2
primals_21 = self.norm1.b_2
primals_22 = self.norm2.a_2
primals_23 = self.norm2.b_2
primals_25 = self.norm3.a_2
primals_27 = self.norm3.b_2
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27])
return output[0]
| wukevin/RoseTTAFold | AxialEncoderLayer | false | 4,580 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def orthogonal_matrix_chunk(cols, qr_uniform_q=False, device=None):
unstructured_block = torch.randn((cols, cols), device=device)
q, r = torch.linalg.qr(unstructured_block.cpu(), 'reduced')
q, r = map(lambda t: t, (q, r))
if qr_uniform_q:
d = torch.diag(r, 0)
q *= d.sign()
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0,
qr_uniform_q=False, device=None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q,
device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim
=1)
elif scaling == 1:
multiplier = math.sqrt(float(nb_columns)) * torch.ones((nb_rows,),
device=device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
def generalized_kernel(data, *, projection_matrix, kernel_fn=nn.ReLU(
inplace=True), kernel_epsilon=0.001, normalize_data=True, device=None):
_b, _h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
data = data_normalizer * data
data = torch.matmul(data, projection_matrix.T)
data = kernel_fn(data) + kernel_epsilon
return data.type_as(data)
def linear_attention(q, k, v):
L = k.shape[-2]
D_inv = 1.0 / torch.einsum('...nd,...d->...n', q, k.mean(dim=-2))
context = torch.einsum('...nd,...ne->...de', k / float(L), v)
del k, v
out = torch.einsum('...n,...nd->...nd', D_inv, q)
del D_inv, q
out = torch.einsum('...nd,...de->...ne', out, context)
return out
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=
True, eps=0.0001, device=None):
b, h, *_ = data.shape
data_normalizer = data.shape[-1] ** -0.25 if normalize_data else 1.0
ratio = projection_matrix.shape[0] ** -0.5
projection = projection_matrix.unsqueeze(0).repeat(h, 1, 1)
projection = projection.unsqueeze(0).repeat(b, 1, 1, 1)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', data_normalizer * data,
projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = diag_data / 2.0 * data_normalizer ** 2
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash, dim=-1, keepdim=True).values) + eps)
else:
data_dash = ratio * (torch.exp(data_dash - diag_data - torch.max(
data_dash)) + eps)
return data_dash.type_as(data)
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module,
type)]
def get_module_device(module):
return next(module.parameters()).device
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super().__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x
# ... truncated (>4000 chars) for memory efficiency |
PointerAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7b/c7br6vyvxp4ar3p4eqjyhtqgqf2st76leiw3l2l33377wdvpxflk.py
# Topologically Sorted Source Nodes: [add, f], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# f => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %unsqueeze), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mc/cmczcqleqvynpeitxpozsz7si5a5pzlhofcqqm4cfrnysouzl3tw.py
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1, add_1], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# e_x => exp
# e_x_1 => mul_1
# max_1 => max_1
# sub => sub
# sum_1 => sum_1
# x => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, %primals_9), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %primals_9), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
triton_poi_fused_add_exp_max_mul_sub_sum_1 = async_compile.triton('triton_poi_fused_add_exp_max_mul_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_max_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_max_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x4 = xindex % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (64 + x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (128 + x4), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (192 + x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp0 * tmp6
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp0 * tmp9
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp13 * tmp1
tmp15 = tmp4 - tmp11
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 * tmp3
tmp18 = tmp14 + tmp17
tmp19 = tmp7 - tmp11
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp20 * tmp6
tmp22 = tmp18 + tmp21
tmp23 = tmp10 - tmp11
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp24 * tmp9
tmp26 = tmp22 + tmp25
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tl.store(out_ptr0 + (x5), tmp11, xmask)
tl.store(out_ptr1 + (x5), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jz/cjz2ly2sfrbducoq4qen3topgalw5xptx3mrfsbmnrixwpb75wx3.py
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1, add_1, softmax], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum, aten.add, aten.div]
# Source node to ATen node mapping:
# add_1 => add_1
# e_x => exp
# e_x_1 => mul_1
# max_1 => max_1
# softmax => div
# sub => sub
# sum_1 => sum_1
# x => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, %primals_9), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %primals_9), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_1), kwargs = {})
triton_poi_fused_add_div_exp_max_mul_sub_sum_2 = async_compile.triton('triton_poi_fused_add_div_exp_max_mul_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_max_mul_sub_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_exp_max_mul_sub_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 16
x3 = (xindex // 256)
x4 = xindex % 256
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (16*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp1
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x6), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, f], Original ATen: [aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_tanh_0.run(buf2, primals_2, buf1, primals_5, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = reinterpret_tensor(buf1, (4, 1, 4, 4, 4), (64, 256, 16, 4, 1), 0); del buf1 # reuse
buf6 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1, add_1], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum, aten.add]
triton_poi_fused_add_exp_max_mul_sub_sum_1.run(buf4, primals_9, buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1, add_1, softmax], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum, aten.add, aten.div]
triton_poi_fused_add_div_exp_max_mul_sub_sum_2.run(buf4, primals_9, buf5, buf6, buf7, 1024, grid=grid(1024), stream=stream0)
del buf5
del buf6
return (buf7, primals_9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf2, buf4, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class PointerAttention(torch.nn.Module):
"""
attention mechanism in pointer network
Args:
- input_size: The number of features in Hr
- hidden_size: The number of features in the hidden layer
Inputs:
Hr(context_len, batch, hidden_size * num_directions): question-aware context representation
Hk_last(batch, hidden_size): the last hidden output of previous time
Outputs:
beta(batch, context_len): question-aware context representation
"""
def __init__(self, input_size, hidden_size):
super(PointerAttention, self).__init__()
self.linear_wr = torch.nn.Linear(input_size, hidden_size)
self.linear_wa = torch.nn.Linear(hidden_size, hidden_size)
self.linear_wf = torch.nn.Linear(hidden_size, 1)
def forward(self, Hr, Hr_mask, Hk_pre):
wr_hr = self.linear_wr(Hr)
wa_ha = self.linear_wa(Hk_pre).unsqueeze(0)
f = F.tanh(wr_hr + wa_ha)
beta_tmp = self.linear_wf(f).squeeze(2).transpose(0, 1)
beta = masked_softmax(beta_tmp, m=Hr_mask, dim=1)
return beta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_exp_max_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (64 + x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (128 + x4), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (192 + x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp0 * tmp3
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp0 * tmp6
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp0 * tmp9
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = tmp2 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp13 * tmp1
tmp15 = tmp4 - tmp11
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 * tmp3
tmp18 = tmp14 + tmp17
tmp19 = tmp7 - tmp11
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp20 * tmp6
tmp22 = tmp18 + tmp21
tmp23 = tmp10 - tmp11
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp24 * tmp9
tmp26 = tmp22 + tmp25
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tl.store(out_ptr0 + x5, tmp11, xmask)
tl.store(out_ptr1 + x5, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_exp_max_mul_sub_sum_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 16
x3 = xindex // 256
x4 = xindex % 256
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr3 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp1
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x6, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0
)
del buf0
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(256)](buf2, primals_2, buf1,
primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_5
buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_8
buf5 = reinterpret_tensor(buf1, (4, 1, 4, 4, 4), (64, 256, 16, 4, 1), 0
)
del buf1
buf6 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 16, 4, 1),
torch.float32)
triton_poi_fused_add_exp_max_mul_sub_sum_1[grid(256)](buf4,
primals_9, buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_add_div_exp_max_mul_sub_sum_2[grid(1024)](buf4,
primals_9, buf5, buf6, buf7, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
del buf5
del buf6
return buf7, primals_9, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), buf2, buf4, primals_7
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class PointerAttentionNew(torch.nn.Module):
"""
attention mechanism in pointer network
Args:
- input_size: The number of features in Hr
- hidden_size: The number of features in the hidden layer
Inputs:
Hr(context_len, batch, hidden_size * num_directions): question-aware context representation
Hk_last(batch, hidden_size): the last hidden output of previous time
Outputs:
beta(batch, context_len): question-aware context representation
"""
def __init__(self, input_size, hidden_size):
super(PointerAttentionNew, self).__init__()
self.linear_wr = torch.nn.Linear(input_size, hidden_size)
self.linear_wa = torch.nn.Linear(hidden_size, hidden_size)
self.linear_wf = torch.nn.Linear(hidden_size, 1)
def forward(self, input_0, input_1, input_2):
primals_1 = self.linear_wr.weight
primals_2 = self.linear_wr.bias
primals_4 = self.linear_wa.weight
primals_5 = self.linear_wa.bias
primals_7 = self.linear_wf.weight
primals_8 = self.linear_wf.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| xdong73S/Match_LSTM_v2.0 | PointerAttention | false | 4,581 | [
"MIT"
] | 0 | dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | https://github.com/xdong73S/Match_LSTM_v2.0/tree/dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | import torch
import torch.utils.data
import torch.nn.functional as F
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class Model(torch.nn.Module):
"""
attention mechanism in pointer network
Args:
- input_size: The number of features in Hr
- hidden_size: The number of features in the hidden layer
Inputs:
Hr(context_len, batch, hidden_size * num_directions): question-aware context representation
Hk_last(batch, hidden_size): the last hidden output of previous time
Outputs:
beta(batch, context_len): question-aware context representation
"""
def __init__(self, input_size, hidden_size):
super().__init__()
self.linear_wr = torch.nn.Linear(input_size, hidden_size)
self.linear_wa = torch.nn.Linear(hidden_size, hidden_size)
self.linear_wf = torch.nn.Linear(hidden_size, 1)
def forward(self, Hr, Hr_mask, Hk_pre):
wr_hr = self.linear_wr(Hr)
wa_ha = self.linear_wa(Hk_pre).unsqueeze(0)
f = F.tanh(wr_hr + wa_ha)
beta_tmp = self.linear_wf(f).squeeze(2).transpose(0, 1)
beta = masked_softmax(beta_tmp, m=Hr_mask, dim=1)
return beta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Enrichment | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dn/cdn6cpjk3yoqht7jm6pdvdb52rl2cfnpkjmaufbqmql33n7ucc5f.py
# Topologically Sorted Source Nodes: [conv2d, o], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# o => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=6] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sf/csfsfbge4st53sowi7fgsjbvox5ryxsfq2lsqrdk4br6sq7dqgyg.py
# Topologically Sorted Source Nodes: [conv2d_1, o1, conv2d_2, o2, conv2d_3, o3, conv2d_4, o4, add, add_1, add_2, out], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# conv2d_1 => convolution_1
# conv2d_2 => convolution_2
# conv2d_3 => convolution_3
# conv2d_4 => convolution_4
# o1 => relu_1
# o2 => relu_2
# o3 => relu_3
# o4 => relu_4
# out => add_3
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [2, 2], [2, 2], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_6, %primals_7, [1, 1], [4, 4], [4, 4], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_8, %primals_9, [1, 1], [6, 6], [6, 6], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_10, %primals_11, [1, 1], [8, 8], [8, 8], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %relu_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %relu_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %relu_3), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %relu_4), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*i1', 11: '*i1', 12: '*i1', 13: '*i1', 14: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x3), None)
tmp2 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x3), None)
tmp8 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x3), None)
tmp13 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr7 + (x3), None)
tmp18 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp11 = tmp6 + tmp10
tmp14 = tmp12 + tmp13
tmp15 = triton_helpers.maximum(tmp4, tmp14)
tmp16 = tmp11 + tmp15
tmp19 = tmp17 + tmp18
tmp20 = triton_helpers.maximum(tmp4, tmp19)
tmp21 = tmp16 + tmp20
tmp22 = 0.0
tmp23 = tmp20 <= tmp22
tmp24 = tmp15 <= tmp22
tmp25 = tmp10 <= tmp22
tmp26 = tmp5 <= tmp22
tl.store(out_ptr0 + (x3), tmp21, None)
tl.store(out_ptr1 + (x3), tmp23, None)
tl.store(out_ptr2 + (x3), tmp24, None)
tl.store(out_ptr3 + (x3), tmp25, None)
tl.store(out_ptr4 + (x3), tmp26, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, o], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 2048, grid=grid(2048), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(2, 2), dilation=(2, 2), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf1, primals_8, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 4, 4), (512, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, primals_10, stride=(1, 1), padding=(8, 8), dilation=(8, 8), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 4, 4), (512, 16, 4, 1))
buf6 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf8 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf9 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, o1, conv2d_2, o2, conv2d_3, o3, conv2d_4, o4, add, add_1, add_2, out], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_threshold_backward_1.run(buf1, buf2, primals_5, buf3, primals_7, buf4, primals_9, buf5, primals_11, buf6, buf7, buf8, buf9, buf10, 2048, grid=grid(2048), stream=stream0)
del buf2
del buf3
del buf4
del buf5
del primals_11
del primals_5
del primals_7
del primals_9
return (buf6, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf7, buf8, buf9, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Enrichment(nn.Module):
def __init__(self, c_in, rate=2):
super(Enrichment, self).__init__()
self.rate = rate
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(c_in, 32, 3, stride=1, padding=1)
dilation = self.rate * 1 if self.rate >= 1 else 1
self.conv1 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
dilation = self.rate * 2 if self.rate >= 1 else 1
self.conv2 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
dilation = self.rate * 3 if self.rate >= 1 else 1
self.conv3 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
dilation = self.rate * 4 if self.rate >= 1 else 1
self.conv4 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
self._initialize_weights()
def forward(self, x):
o = self.relu(self.conv(x))
o1 = self.relu(self.conv1(o))
o2 = self.relu(self.conv2(o))
o3 = self.relu(self.conv3(o))
o4 = self.relu(self.conv4(o))
out = o + o1 + o2 + o3 + o4
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x3, None)
tmp8 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x3, None)
tmp13 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr7 + x3, None)
tmp18 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp0 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp4, tmp9)
tmp11 = tmp6 + tmp10
tmp14 = tmp12 + tmp13
tmp15 = triton_helpers.maximum(tmp4, tmp14)
tmp16 = tmp11 + tmp15
tmp19 = tmp17 + tmp18
tmp20 = triton_helpers.maximum(tmp4, tmp19)
tmp21 = tmp16 + tmp20
tmp22 = 0.0
tmp23 = tmp20 <= tmp22
tmp24 = tmp15 <= tmp22
tmp25 = tmp10 <= tmp22
tmp26 = tmp5 <= tmp22
tl.store(out_ptr0 + x3, tmp21, None)
tl.store(out_ptr1 + x3, tmp23, None)
tl.store(out_ptr2 + x3, tmp24, None)
tl.store(out_ptr3 + x3, tmp25, None)
tl.store(out_ptr4 + x3, tmp26, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(2048)](buf1, primals_2,
2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(2, 2), dilation=(2, 2), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 4, 4), (512, 16, 4, 1))
buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1, 1),
padding=(4, 4), dilation=(4, 4), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 4, 4), (512, 16, 4, 1))
buf4 = extern_kernels.convolution(buf1, primals_8, stride=(1, 1),
padding=(6, 6), dilation=(6, 6), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 4, 4), (512, 16, 4, 1))
buf5 = extern_kernels.convolution(buf1, primals_10, stride=(1, 1),
padding=(8, 8), dilation=(8, 8), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 4, 4), (512, 16, 4, 1))
buf6 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32
)
buf7 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf8 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf9 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(2048)](
buf1, buf2, primals_5, buf3, primals_7, buf4, primals_9, buf5,
primals_11, buf6, buf7, buf8, buf9, buf10, 2048, XBLOCK=256,
num_warps=4, num_stages=1)
del buf2
del buf3
del buf4
del buf5
del primals_11
del primals_5
del primals_7
del primals_9
return (buf6, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf7, buf8, buf9, buf10)
class EnrichmentNew(nn.Module):
def __init__(self, c_in, rate=2):
super(EnrichmentNew, self).__init__()
self.rate = rate
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(c_in, 32, 3, stride=1, padding=1)
dilation = self.rate * 1 if self.rate >= 1 else 1
self.conv1 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
dilation = self.rate * 2 if self.rate >= 1 else 1
self.conv2 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
dilation = self.rate * 3 if self.rate >= 1 else 1
self.conv3 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
dilation = self.rate * 4 if self.rate >= 1 else 1
self.conv4 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.conv1.weight
primals_5 = self.conv1.bias
primals_6 = self.conv2.weight
primals_7 = self.conv2.bias
primals_8 = self.conv3.weight
primals_9 = self.conv3.bias
primals_10 = self.conv4.weight
primals_11 = self.conv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| xavysp/TIN_xsp | Enrichment | false | 4,582 | [
"MIT"
] | 0 | 9f68e03923f637f4d4ef885694dfc3aaaaad6cea | https://github.com/xavysp/TIN_xsp/tree/9f68e03923f637f4d4ef885694dfc3aaaaad6cea | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, c_in, rate=2):
super().__init__()
self.rate = rate
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(c_in, 32, 3, stride=1, padding=1)
dilation = self.rate * 1 if self.rate >= 1 else 1
self.conv1 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
dilation = self.rate * 2 if self.rate >= 1 else 1
self.conv2 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
dilation = self.rate * 3 if self.rate >= 1 else 1
self.conv3 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
dilation = self.rate * 4 if self.rate >= 1 else 1
self.conv4 = nn.Conv2d(32, 32, 3, stride=1, dilation=dilation,
padding=dilation)
self._initialize_weights()
def forward(self, x):
o = self.relu(self.conv(x))
o1 = self.relu(self.conv1(o))
o2 = self.relu(self.conv2(o))
o3 = self.relu(self.conv3(o))
o4 = self.relu(self.conv4(o))
out = o + o1 + o2 + o3 + o4
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PredLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/te/ctez6v53ld4k45ykiwq5ndlpz3jrny2atstdxadog7gcnit65abw.py
# Topologically Sorted Source Nodes: [_y, y], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# _y => mul
# y => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_y, y], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def module_test_print(var_input, var_inmed, var_ouput):
for var in (var_input, var_inmed, var_ouput):
None
for key, value in var.items():
None
None
class PredLayer(nn.Module):
def __init__(self, module_test=False):
super(PredLayer, self).__init__()
self.module_test = module_test
def forward(self, u, v):
"""
input:
u: (batch_size, 1, dim_user)
v: (batch_size, 1, dim_item)
output:
y: (batch_size, 1)
"""
_y = torch.mul(u, v)
y = torch.sum(_y, dim=-1)
if self.module_test:
var_input = ['u', 'v']
var_inmed = ['_y']
var_ouput = ['y']
locals_cap = locals()
module_test_print(dict(zip(var_input, [eval(v, locals_cap) for
v in var_input])), dict(zip(var_inmed, [eval(v, locals_cap) for
v in var_inmed])), dict(zip(var_ouput, [eval(v, locals_cap) for
v in var_ouput])))
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
def module_test_print(var_input, var_inmed, var_ouput):
for var in (var_input, var_inmed, var_ouput):
None
for key, value in var.items():
None
None
class PredLayerNew(nn.Module):
def __init__(self, module_test=False):
super(PredLayerNew, self).__init__()
self.module_test = module_test
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| xlx0010/HGNN | PredLayer | false | 4,583 | [
"MIT"
] | 0 | 219352405db021c1f435f3aa55961adcf2a6df19 | https://github.com/xlx0010/HGNN/tree/219352405db021c1f435f3aa55961adcf2a6df19 | import torch
import torch.nn as nn
def module_test_print(var_input, var_inmed, var_ouput):
for var in (var_input, var_inmed, var_ouput):
None
for key, value in var.items():
None
None
class Model(nn.Module):
def __init__(self, module_test=False):
super().__init__()
self.module_test = module_test
def forward(self, u, v):
"""
input:
u: (batch_size, 1, dim_user)
v: (batch_size, 1, dim_item)
output:
y: (batch_size, 1)
"""
_y = torch.mul(u, v)
y = torch.sum(_y, dim=-1)
if self.module_test:
var_input = ['u', 'v']
var_inmed = ['_y']
var_ouput = ['y']
locals_cap = locals()
module_test_print(dict(zip(var_input, [eval(v, locals_cap) for
v in var_input])), dict(zip(var_inmed, [eval(v, locals_cap) for
v in var_inmed])), dict(zip(var_ouput, [eval(v, locals_cap) for
v in var_ouput])))
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/uv/cuvpdztljgdb5yhe3d2yn6ygnf7xg4cnyqeufpxtspmbboqwcx6k.py
# Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, mean], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# exp => exp
# exp_1 => exp_1
# exp_2 => exp_3
# invprobs => abs_1, exp_2, full_default, log1p, minimum, neg_4, sub_3
# log => log
# loss => add_2
# loss_1 => mul_4
# max_val => clamp_min
# mean => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# neg => neg
# neg_1 => neg_1
# neg_2 => neg_2
# neg_3 => neg_3
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_3, %sub_2), kwargs = {})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %mul_2), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_2,), kwargs = {})
# %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_4,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 4), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%neg, 0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %clamp_min), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%clamp_min,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_2, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, %exp_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %log), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, %add_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {})
triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0 = async_compile.triton('triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = -tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = 0.0
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tmp10 = tl_math.abs(tmp7)
tmp11 = -tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = libdevice.log1p(tmp12)
tmp14 = tmp9 - tmp13
tmp15 = 4.0
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp0 * tmp2
tmp19 = tmp0 - tmp18
tmp20 = triton_helpers.maximum(tmp1, tmp8)
tmp21 = tmp19 + tmp20
tmp22 = -tmp20
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp1 - tmp20
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp21 + tmp27
tmp29 = tmp17 * tmp28
tmp30 = tl.broadcast_to(tmp29, [RBLOCK])
tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0))
tmp33 = 256.0
tmp34 = tmp32 / tmp33
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp34, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, mean], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, gamma):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not target.size() == input.size():
raise ValueError(
'Target size ({}) must be the same as input size ({})'.
format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-
input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'gamma': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = -tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = 0.0
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tmp10 = tl_math.abs(tmp7)
tmp11 = -tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = libdevice.log1p(tmp12)
tmp14 = tmp9 - tmp13
tmp15 = 4.0
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp0 * tmp2
tmp19 = tmp0 - tmp18
tmp20 = triton_helpers.maximum(tmp1, tmp8)
tmp21 = tmp19 + tmp20
tmp22 = -tmp20
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp1 - tmp20
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp21 + tmp27
tmp29 = tmp17 * tmp28
tmp30 = tl.broadcast_to(tmp29, [RBLOCK])
tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0))
tmp33 = 256.0
tmp34 = tmp32 / tmp33
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp34, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0[
grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class FocalLossNew(nn.Module):
def __init__(self, gamma):
super().__init__()
self.gamma = gamma
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| xkp793003821/kaggle-tgs-salt | FocalLoss | false | 4,584 | [
"MIT"
] | 0 | 4acd7f8b6aff914e2c8558677d6dac8b5ddc1f30 | https://github.com/xkp793003821/kaggle-tgs-salt/tree/4acd7f8b6aff914e2c8558677d6dac8b5ddc1f30 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, gamma):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not target.size() == input.size():
raise ValueError(
'Target size ({}) must be the same as input size ({})'.
format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-
input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MyGlobalAvgPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/h3/ch3l34kqqlue6keu2k5zyuitwqh5ph3eypbf57e4juzyb5tqpeva.py
# Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# mean_1 => mean_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [3], True), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2], True), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + (x0), tmp36, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, mean_1], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
class MyGlobalAvgPool2d(nn.Module):
def __init__(self, keep_dim=True):
super(MyGlobalAvgPool2d, self).__init__()
self.keep_dim = keep_dim
def forward(self, x):
return x.mean(3, keepdim=self.keep_dim).mean(2, keepdim=self.keep_dim)
def __repr__(self):
return 'MyGlobalAvgPool2d(keep_dim=%s)' % self.keep_dim
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x0, tmp36, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MyGlobalAvgPool2dNew(nn.Module):
def __init__(self, keep_dim=True):
super(MyGlobalAvgPool2dNew, self).__init__()
self.keep_dim = keep_dim
def __repr__(self):
return 'MyGlobalAvgPool2d(keep_dim=%s)' % self.keep_dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| xmyqsh/once-for-all | MyGlobalAvgPool2d | false | 4,585 | [
"MIT"
] | 0 | 0bca1778b106d33460fc8d0f7d7e6ca4e1e937d9 | https://github.com/xmyqsh/once-for-all/tree/0bca1778b106d33460fc8d0f7d7e6ca4e1e937d9 | import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.parallel
import torch.optim
class Model(nn.Module):
def __init__(self, keep_dim=True):
super().__init__()
self.keep_dim = keep_dim
def forward(self, x):
return x.mean(3, keepdim=self.keep_dim).mean(2, keepdim=self.keep_dim)
def __repr__(self):
return 'MyGlobalAvgPool2d(keep_dim=%s)' % self.keep_dim
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MixedLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/py/cpyoafwcjvfyxxpmlgdyskykz6gyhyfkuluphlyqm4rwwjuqw565.py
# Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, mean, mul_5, mul_6, intersection, mul_7, add_3, sum_2, sum_3, add_4, add_5, truediv, log_1, loss_2, mean_1], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.mean, aten.sum, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# exp => exp
# exp_1 => exp_1
# exp_2 => exp_3
# intersection => sum_1
# invprobs => abs_1, exp_2, full_default, log1p, minimum, neg_4, sub_3
# log => log
# log_1 => log_1
# loss => add_2
# loss_1 => mul_4
# loss_2 => sub_4
# max_val => clamp_min
# mean => mean
# mean_1 => mean_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# neg => neg
# neg_1 => neg_1
# neg_2 => neg_2
# neg_3 => neg_3
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sum_2 => sum_2
# sum_3 => sum_3
# truediv => div
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_3, %sub_2), kwargs = {})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %mul_2), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_2,), kwargs = {})
# %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_4,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 4), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%neg, 0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %clamp_min), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%clamp_min,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_2, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, %exp_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %log), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, %add_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 4), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_6,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, 1.0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_3, %add_5), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, %log_1), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_4,), kwargs = {})
triton_per_fused_add_clamp_div_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_div_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_div_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_div_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = -tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = 0.0
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tmp10 = tl_math.abs(tmp7)
tmp11 = -tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = libdevice.log1p(tmp12)
tmp14 = tmp9 - tmp13
tmp15 = 4.0
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp0 * tmp2
tmp19 = tmp0 - tmp18
tmp20 = triton_helpers.maximum(tmp1, tmp8)
tmp21 = tmp19 + tmp20
tmp22 = -tmp20
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp1 - tmp20
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp21 + tmp27
tmp29 = tmp17 * tmp28
tmp30 = tl.broadcast_to(tmp29, [RBLOCK])
tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0))
tmp33 = tl.sigmoid(tmp0)
tmp34 = tmp33 * tmp2
tmp35 = tl.broadcast_to(tmp34, [RBLOCK])
tmp37 = triton_helpers.promote_to_tensor(tl.sum(tmp35, 0))
tmp38 = tl.broadcast_to(tmp33, [RBLOCK])
tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0))
tmp41 = tl.broadcast_to(tmp2, [RBLOCK])
tmp43 = triton_helpers.promote_to_tensor(tl.sum(tmp41, 0))
tmp44 = 256.0
tmp45 = tmp32 / tmp44
tmp46 = tmp45 * tmp15
tmp47 = tmp37 * tmp3
tmp48 = tmp47 + tmp5
tmp49 = tmp40 + tmp43
tmp50 = tmp49 + tmp5
tmp51 = tmp48 / tmp50
tmp52 = tl_math.log(tmp51)
tmp53 = tmp46 - tmp52
tmp54 = tmp53 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp54, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, mean, mul_5, mul_6, intersection, mul_7, add_3, sum_2, sum_3, add_4, add_5, truediv, log_1, loss_2, mean_1], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.mean, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_div_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0.run(buf4, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def dice_loss(input, target):
input = torch.sigmoid(input)
smooth = 1.0
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return (2.0 * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)
class FocalLoss(nn.Module):
def __init__(self, gamma):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not target.size() == input.size():
raise ValueError(
'Target size ({}) must be the same as input size ({})'.
format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-
input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.mean()
class MixedLoss(nn.Module):
def __init__(self, alpha, gamma):
super().__init__()
self.alpha = alpha
self.focal = FocalLoss(gamma)
def forward(self, input, target):
loss = self.alpha * self.focal(input, target) - torch.log(dice_loss
(input, target))
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'alpha': 4, 'gamma': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_div_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = -tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = 0.0
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tmp10 = tl_math.abs(tmp7)
tmp11 = -tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = libdevice.log1p(tmp12)
tmp14 = tmp9 - tmp13
tmp15 = 4.0
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp0 * tmp2
tmp19 = tmp0 - tmp18
tmp20 = triton_helpers.maximum(tmp1, tmp8)
tmp21 = tmp19 + tmp20
tmp22 = -tmp20
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp1 - tmp20
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tl_math.log(tmp26)
tmp28 = tmp21 + tmp27
tmp29 = tmp17 * tmp28
tmp30 = tl.broadcast_to(tmp29, [RBLOCK])
tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0))
tmp33 = tl.sigmoid(tmp0)
tmp34 = tmp33 * tmp2
tmp35 = tl.broadcast_to(tmp34, [RBLOCK])
tmp37 = triton_helpers.promote_to_tensor(tl.sum(tmp35, 0))
tmp38 = tl.broadcast_to(tmp33, [RBLOCK])
tmp40 = triton_helpers.promote_to_tensor(tl.sum(tmp38, 0))
tmp41 = tl.broadcast_to(tmp2, [RBLOCK])
tmp43 = triton_helpers.promote_to_tensor(tl.sum(tmp41, 0))
tmp44 = 256.0
tmp45 = tmp32 / tmp44
tmp46 = tmp45 * tmp15
tmp47 = tmp37 * tmp3
tmp48 = tmp47 + tmp5
tmp49 = tmp40 + tmp43
tmp50 = tmp49 + tmp5
tmp51 = tmp48 / tmp50
tmp52 = tl_math.log(tmp51)
tmp53 = tmp46 - tmp52
tmp54 = tmp53 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp54, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_clamp_div_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0[
grid(1)](buf4, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf4,
def dice_loss(input, target):
input = torch.sigmoid(input)
smooth = 1.0
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return (2.0 * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)
class FocalLoss(nn.Module):
def __init__(self, gamma):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not target.size() == input.size():
raise ValueError(
'Target size ({}) must be the same as input size ({})'.
format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-
input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.mean()
class MixedLossNew(nn.Module):
def __init__(self, alpha, gamma):
super().__init__()
self.alpha = alpha
self.focal = FocalLoss(gamma)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| xkp793003821/kaggle-tgs-salt | MixedLoss | false | 4,586 | [
"MIT"
] | 0 | 4acd7f8b6aff914e2c8558677d6dac8b5ddc1f30 | https://github.com/xkp793003821/kaggle-tgs-salt/tree/4acd7f8b6aff914e2c8558677d6dac8b5ddc1f30 | import torch
import torch.nn as nn
import torch.nn.functional as F
def dice_loss(input, target):
input = torch.sigmoid(input)
smooth = 1.0
iflat = input.view(-1)
tflat = target.view(-1)
intersection = (iflat * tflat).sum()
return (2.0 * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)
class FocalLoss(nn.Module):
def __init__(self, gamma):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not target.size() == input.size():
raise ValueError(
'Target size ({}) must be the same as input size ({})'.
format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-
input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.mean()
class Model(nn.Module):
def __init__(self, alpha, gamma):
super().__init__()
self.alpha = alpha
self.focal = FocalLoss(gamma)
def forward(self, input, target):
loss = self.alpha * self.focal(input, target) - torch.log(dice_loss
(input, target))
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/na/cna7lsxfq3e22tnqxr3tryqlowbf6fnexzbroh4bixhioq4dmjoh.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py
# Topologically Sorted Source Nodes: [results], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# results => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py
# Topologically Sorted Source Nodes: [results], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# results => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [product_1], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_1, reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 4), 0), out=buf2)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [results], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [results], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
del buf3
return (buf4, buf0, buf4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import init
from torch.nn.parameter import Parameter
class SelfAttention(torch.nn.Module):
def __init__(self, wv_dim: 'int', maxlen: 'int'):
super(SelfAttention, self).__init__()
self.wv_dim = wv_dim
self.maxlen = maxlen
self.M = Parameter(torch.empty(size=(wv_dim, wv_dim)))
init.kaiming_uniform_(self.M.data)
self.attention_softmax = torch.nn.Softmax(dim=-1)
def forward(self, input_embeddings):
mean_embedding = torch.mean(input_embeddings, (1,)).unsqueeze(2)
product_1 = torch.matmul(self.M, mean_embedding)
product_2 = torch.matmul(input_embeddings, product_1).squeeze(2)
results = self.attention_softmax(product_2)
return results
def extra_repr(self):
return 'wv_dim={}, maxlen={}'.format(self.wv_dim, self.maxlen)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'wv_dim': 4, 'maxlen': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import init
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4
), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(buf1, (4, 4, 1), (
4, 1, 4), 0), out=buf2)
buf3 = buf1
del buf1
triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf3
return buf4, buf0, buf4, reinterpret_tensor(primals_1, (4, 4, 4), (16,
1, 4), 0)
class SelfAttentionNew(torch.nn.Module):
def __init__(self, wv_dim: 'int', maxlen: 'int'):
super(SelfAttentionNew, self).__init__()
self.wv_dim = wv_dim
self.maxlen = maxlen
self.M = Parameter(torch.empty(size=(wv_dim, wv_dim)))
init.kaiming_uniform_(self.M.data)
self.attention_softmax = torch.nn.Softmax(dim=-1)
def extra_repr(self):
return 'wv_dim={}, maxlen={}'.format(self.wv_dim, self.maxlen)
def forward(self, input_0):
primals_2 = self.M
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| xlwreally/Graduation-project-ABAE | SelfAttention | false | 4,587 | [
"MIT"
] | 0 | 7c389acfff0fd207e4588b4333521e2dfbf12ec7 | https://github.com/xlwreally/Graduation-project-ABAE/tree/7c389acfff0fd207e4588b4333521e2dfbf12ec7 | import torch
from torch.nn import init
from torch.nn.parameter import Parameter
class Model(torch.nn.Module):
def __init__(self, wv_dim: 'int', maxlen: 'int'):
super().__init__()
self.wv_dim = wv_dim
self.maxlen = maxlen
self.M = Parameter(torch.empty(size=(wv_dim, wv_dim)))
init.kaiming_uniform_(self.M.data)
self.attention_softmax = torch.nn.Softmax(dim=-1)
def forward(self, input_embeddings):
mean_embedding = torch.mean(input_embeddings, (1,)).unsqueeze(2)
product_1 = torch.matmul(self.M, mean_embedding)
product_2 = torch.matmul(input_embeddings, product_1).squeeze(2)
results = self.attention_softmax(product_2)
return results
def extra_repr(self):
return 'wv_dim={}, maxlen={}'.format(self.wv_dim, self.maxlen)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SoftDetectionModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/72/c72xsm4j2ydy65cd6s2dqcf73hjvkqbi2lraxbqgd3vljynafcgn.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => max_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {})
triton_per_fused_max_0 = async_compile.triton('triton_per_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yh/cyhmw3wd3ttwv3gwn5pdrawto4dhpm5r565oikeygp4rncuwxbr2.py
# Topologically Sorted Source Nodes: [batch, truediv, exp, pad], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd]
# Source node to ATen node mapping:
# batch => relu
# exp => exp
# pad => constant_pad_nd
# truediv => div
# Graph fragment:
# %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%exp, [1, 1, 1, 1], 1.0), kwargs = {})
triton_poi_fused_constant_pad_nd_div_exp_relu_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_div_exp_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_div_exp_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_div_exp_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 6) % 6
x0 = xindex % 6
x4 = (xindex // 36)
x3 = (xindex // 144)
x6 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x4)), tmp10 & xmask, other=0.0)
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr1 + (x3), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 / tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tl.full(tmp16.shape, 1.0, tmp16.dtype)
tmp18 = tl.where(tmp10, tmp16, tmp17)
tl.store(out_ptr0 + (x6), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/av/cavduzqn2kcecsivfbdoiquab64kxakphvxa2ir5glbkv2uxr56x.py
# Topologically Sorted Source Nodes: [batch, truediv, exp, pad, avg_pool2d], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd, aten.avg_pool2d]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# batch => relu
# exp => exp
# pad => constant_pad_nd
# truediv => div
# Graph fragment:
# %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%exp, [1, 1, 1, 1], 1.0), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [3, 3], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (6*x1) + (36*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (6*x1) + (36*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + (6*x1) + (36*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + (6*x1) + (36*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + (6*x1) + (36*x2)), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + (6*x1) + (36*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + (6*x1) + (36*x2)), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + (6*x1) + (36*x2)), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + (6*x1) + (36*x2)), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + (x3), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6v/c6v2dq4j27mrmzkudmd2l2ookfsao2o6sqtsagdumpcc33lwpchn.py
# Topologically Sorted Source Nodes: [batch, max_2, truediv, exp, sum_exp, local_max_score, depth_wise_max_score, all_scores, max_3, sum_1, score_1], Original ATen: [aten.relu, aten.max, aten.div, aten.exp, aten.mul, aten.sum]
# Source node to ATen node mapping:
# all_scores => mul_1
# batch => relu
# depth_wise_max_score => div_2
# exp => exp
# local_max_score => div_1
# max_2 => max_2
# max_3 => max_3
# score_1 => div_3
# sum_1 => sum_1
# sum_exp => mul
# truediv => div
# Graph fragment:
# %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%relu, 1, True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 9), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %mul), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %getitem_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div_2), kwargs = {})
# %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul_1, 1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_2, [1]), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%getitem_4, %view_3), kwargs = {})
triton_per_fused_div_exp_max_mul_relu_sum_3 = async_compile.triton('triton_per_fused_div_exp_max_mul_relu_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_max_mul_relu_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_exp_max_mul_relu_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (r1 + (64*x0)), xmask, other=0.0)
tmp10 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp13 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp16 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp23 = tl.load(in_ptr2 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp31 = tl.load(in_ptr2 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp39 = tl.load(in_ptr2 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 / tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = 9.0
tmp8 = tmp6 * tmp7
tmp9 = tmp5 / tmp8
tmp11 = triton_helpers.maximum(tmp1, tmp10)
tmp12 = triton_helpers.maximum(tmp2, tmp11)
tmp14 = triton_helpers.maximum(tmp1, tmp13)
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = triton_helpers.maximum(tmp1, tmp16)
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp2 / tmp18
tmp20 = tmp9 * tmp19
tmp21 = tmp11 / tmp3
tmp22 = tl_math.exp(tmp21)
tmp24 = tmp23 * tmp7
tmp25 = tmp22 / tmp24
tmp26 = tmp11 / tmp18
tmp27 = tmp25 * tmp26
tmp28 = triton_helpers.maximum(tmp20, tmp27)
tmp29 = tmp14 / tmp3
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp31 * tmp7
tmp33 = tmp30 / tmp32
tmp34 = tmp14 / tmp18
tmp35 = tmp33 * tmp34
tmp36 = triton_helpers.maximum(tmp28, tmp35)
tmp37 = tmp17 / tmp3
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp39 * tmp7
tmp41 = tmp38 / tmp40
tmp42 = tmp17 / tmp18
tmp43 = tmp41 * tmp42
tmp44 = triton_helpers.maximum(tmp36, tmp43)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp47 = tl.where(xmask, tmp45, 0)
tmp48 = tl.sum(tmp47, 1)[:, None]
tmp49 = tmp44 / tmp48
tl.store(out_ptr2 + (r1 + (16*x0)), tmp49, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_per_fused_max_0.run(arg0_1, buf0, 4, 64, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch, truediv, exp, pad], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_div_exp_relu_1.run(arg0_1, buf0, buf2, 576, grid=grid(576), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch, truediv, exp, pad, avg_pool2d], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd, aten.avg_pool2d]
triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch, max_2, truediv, exp, sum_exp, local_max_score, depth_wise_max_score, all_scores, max_3, sum_1, score_1], Original ATen: [aten.relu, aten.max, aten.div, aten.exp, aten.mul, aten.sum]
triton_per_fused_div_exp_max_mul_relu_sum_3.run(arg0_1, buf0, buf3, buf6, 4, 16, grid=grid(4), stream=stream0)
del arg0_1
del buf0
del buf3
return (buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils
import torch.nn as nn
import torch.nn.functional as F
class SoftDetectionModule(nn.Module):
def __init__(self, soft_local_max_size=3):
super(SoftDetectionModule, self).__init__()
self.soft_local_max_size = soft_local_max_size
self.pad = self.soft_local_max_size // 2
self.scale = 1000
def forward(self, batch):
b = batch.size(0)
batch = F.relu(batch)
max_per_sample = torch.max(batch.view(b, -1), dim=1)[0]
exp = torch.exp(batch / max_per_sample.view(b, 1, 1, 1))
sum_exp = self.soft_local_max_size ** 2 * F.avg_pool2d(F.pad(exp, [
self.pad] * 4, mode='constant', value=1.0), self.
soft_local_max_size, stride=1)
local_max_score = exp / sum_exp
depth_wise_max = torch.max(batch, dim=1, keepdim=True)[0]
depth_wise_max_score = batch / depth_wise_max
all_scores = local_max_score * depth_wise_max_score
score = torch.max(all_scores, dim=1)[0]
score = score / torch.sum(score.view(b, -1), dim=1).view(b, 1, 1)
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_div_exp_relu_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x4 = xindex // 36
x3 = xindex // 144
x6 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x4), tmp10 & xmask,
other=0.0)
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr1 + x3, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 / tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tl.full(tmp16.shape, 1.0, tmp16.dtype)
tmp18 = tl.where(tmp10, tmp16, tmp17)
tl.store(out_ptr0 + x6, tmp18, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + x3, tmp18, xmask)
@triton.jit
def triton_per_fused_div_exp_max_mul_relu_sum_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0)
tmp10 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp13 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp16 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp23 = tl.load(in_ptr2 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp31 = tl.load(in_ptr2 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp39 = tl.load(in_ptr2 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 / tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = 9.0
tmp8 = tmp6 * tmp7
tmp9 = tmp5 / tmp8
tmp11 = triton_helpers.maximum(tmp1, tmp10)
tmp12 = triton_helpers.maximum(tmp2, tmp11)
tmp14 = triton_helpers.maximum(tmp1, tmp13)
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = triton_helpers.maximum(tmp1, tmp16)
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp2 / tmp18
tmp20 = tmp9 * tmp19
tmp21 = tmp11 / tmp3
tmp22 = tl_math.exp(tmp21)
tmp24 = tmp23 * tmp7
tmp25 = tmp22 / tmp24
tmp26 = tmp11 / tmp18
tmp27 = tmp25 * tmp26
tmp28 = triton_helpers.maximum(tmp20, tmp27)
tmp29 = tmp14 / tmp3
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp31 * tmp7
tmp33 = tmp30 / tmp32
tmp34 = tmp14 / tmp18
tmp35 = tmp33 * tmp34
tmp36 = triton_helpers.maximum(tmp28, tmp35)
tmp37 = tmp17 / tmp3
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp39 * tmp7
tmp41 = tmp38 / tmp40
tmp42 = tmp17 / tmp18
tmp43 = tmp41 * tmp42
tmp44 = triton_helpers.maximum(tmp36, tmp43)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp47 = tl.where(xmask, tmp45, 0)
tmp48 = tl.sum(tmp47, 1)[:, None]
tmp49 = tmp44 / tmp48
tl.store(out_ptr2 + (r1 + 16 * x0), tmp49, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_max_0[grid(4)](arg0_1, buf0, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_constant_pad_nd_div_exp_relu_1[grid(576)](arg0_1,
buf0, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2[grid(256)](
buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_div_exp_max_mul_relu_sum_3[grid(4)](arg0_1, buf0,
buf3, buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
del buf3
return buf6,
class SoftDetectionModuleNew(nn.Module):
def __init__(self, soft_local_max_size=3):
super(SoftDetectionModuleNew, self).__init__()
self.soft_local_max_size = soft_local_max_size
self.pad = self.soft_local_max_size // 2
self.scale = 1000
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| xmlyqing00/d2-net | SoftDetectionModule | false | 4,588 | [
"BSD-3-Clause-Clear"
] | 0 | 3454a2862088682a6bdb2532ff049fd6cd82729c | https://github.com/xmlyqing00/d2-net/tree/3454a2862088682a6bdb2532ff049fd6cd82729c | import torch
import torch.utils
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, soft_local_max_size=3):
super().__init__()
self.soft_local_max_size = soft_local_max_size
self.pad = self.soft_local_max_size // 2
self.scale = 1000
def forward(self, batch):
b = batch.size(0)
batch = F.relu(batch)
max_per_sample = torch.max(batch.view(b, -1), dim=1)[0]
exp = torch.exp(batch / max_per_sample.view(b, 1, 1, 1))
sum_exp = self.soft_local_max_size ** 2 * F.avg_pool2d(F.pad(exp, [
self.pad] * 4, mode='constant', value=1.0), self.
soft_local_max_size, stride=1)
local_max_score = exp / sum_exp
depth_wise_max = torch.max(batch, dim=1, keepdim=True)[0]
depth_wise_max_score = batch / depth_wise_max
all_scores = local_max_score * depth_wise_max_score
score = torch.max(all_scores, dim=1)[0]
score = score / torch.sum(score.view(b, -1), dim=1).view(b, 1, 1)
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ForwardNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/z4/cz4cbw4gpj7elswxd76xfi2rczcz7clesffiyvgdrdahgny5dakh.py
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum]
# Source node to ATen node mapping:
# e_x => exp
# e_x_1 => mul_1
# max_1 => max_1
# sub => sub
# sum_1 => sum_1
# x => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, %primals_6), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %primals_6), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1], True), kwargs = {})
triton_poi_fused_exp_max_mul_sub_sum_1 = async_compile.triton('triton_poi_fused_exp_max_mul_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_max_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_max_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x2 = (xindex // 16)
x4 = xindex % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4 + (64*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x4 + (64*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x4 + (64*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x4 + (64*x2)), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 * tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 * tmp1
tmp18 = tmp5 - tmp14
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 * tmp4
tmp21 = tmp17 + tmp20
tmp22 = tmp9 - tmp14
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 * tmp8
tmp25 = tmp21 + tmp24
tmp26 = tmp13 - tmp14
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp27 * tmp12
tmp29 = tmp25 + tmp28
tl.store(out_ptr0 + (x5), tmp14, xmask)
tl.store(out_ptr1 + (x5), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6m/c6maj6noglgg3okzmxiv4u3srhu54pov7obv57fn6vlkqal326zf.py
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, add, softmax], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# e_x => exp
# e_x_1 => mul_1
# max_1 => max_1
# softmax => div
# sub => sub
# x => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_2, %primals_6), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %getitem), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %primals_6), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add), kwargs = {})
triton_poi_fused_add_div_exp_max_mul_sub_2 = async_compile.triton('triton_poi_fused_add_div_exp_max_mul_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_max_mul_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_exp_max_mul_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
x5 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x1 + (4*x3) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask)
tmp3 = tl.load(in_ptr2 + (x5 + (16*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x5 + (16*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp1
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = tmp6 / tmp9
tl.store(out_ptr0 + (x5 + (16*x3) + (64*x2)), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [o], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, sum_1], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.sum]
triton_poi_fused_exp_max_mul_sub_sum_1.run(buf3, primals_6, buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 4, 4), (16, 64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, max_1, sub, e_x, e_x_1, add, softmax], Original ATen: [aten.mul, aten.max, aten.sub, aten.exp, aten.add, aten.div]
triton_poi_fused_add_div_exp_max_mul_sub_2.run(buf3, primals_6, buf4, buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf4
del buf5
return (buf6, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class ForwardNet(torch.nn.Module):
"""
one hidden layer and one softmax layer.
Args:
- input_size:
- hidden_size:
- output_size:
- dropout_p:
Inputs:
- x: (seq_len, batch, input_size)
- x_mask: (batch, seq_len)
Outputs:
- beta: (batch, seq_len)
"""
def __init__(self, input_size, hidden_size, dropout_p):
super(ForwardNet, self).__init__()
self.linear_h = torch.nn.Linear(input_size, hidden_size)
self.linear_o = torch.nn.Linear(hidden_size, 1)
self.dropout = torch.nn.Dropout(p=dropout_p)
def forward(self, x, x_mask):
h = F.relu(self.linear_h(x))
h = self.dropout(h)
o = self.linear_o(h)
o = o.squeeze(2).transpose(0, 1)
beta = masked_softmax(o, x_mask, dim=1)
return beta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'dropout_p': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_exp_max_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x2 = xindex // 16
x4 = xindex % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4 + 64 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x3), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x4 + 64 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x4 + 64 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (48 + x4 + 64 * x2), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 * tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 * tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp16 * tmp1
tmp18 = tmp5 - tmp14
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 * tmp4
tmp21 = tmp17 + tmp20
tmp22 = tmp9 - tmp14
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 * tmp8
tmp25 = tmp21 + tmp24
tmp26 = tmp13 - tmp14
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp27 * tmp12
tmp29 = tmp25 + tmp28
tl.store(out_ptr0 + x5, tmp14, xmask)
tl.store(out_ptr1 + x5, tmp29, xmask)
@triton.jit
def triton_poi_fused_add_div_exp_max_mul_sub_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
x5 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp3 = tl.load(in_ptr2 + (x5 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr3 + (x5 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp1
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = tmp6 / tmp9
tl.store(out_ptr0 + (x5 + 16 * x3 + 64 * x2), tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused_exp_max_mul_sub_sum_1[grid(64)](buf3, primals_6,
buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused_add_div_exp_max_mul_sub_2[grid(256)](buf3,
primals_6, buf4, buf5, buf6, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf4
del buf5
return buf6, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, buf7
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class ForwardNetNew(torch.nn.Module):
"""
one hidden layer and one softmax layer.
Args:
- input_size:
- hidden_size:
- output_size:
- dropout_p:
Inputs:
- x: (seq_len, batch, input_size)
- x_mask: (batch, seq_len)
Outputs:
- beta: (batch, seq_len)
"""
def __init__(self, input_size, hidden_size, dropout_p):
super(ForwardNetNew, self).__init__()
self.linear_h = torch.nn.Linear(input_size, hidden_size)
self.linear_o = torch.nn.Linear(hidden_size, 1)
self.dropout = torch.nn.Dropout(p=dropout_p)
def forward(self, input_0, input_1):
primals_1 = self.linear_h.weight
primals_2 = self.linear_h.bias
primals_4 = self.linear_o.weight
primals_5 = self.linear_o.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| xdong73S/Match_LSTM_v2.0 | ForwardNet | false | 4,589 | [
"MIT"
] | 0 | dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | https://github.com/xdong73S/Match_LSTM_v2.0/tree/dfb8cfbc2a5dafc6655eecf151a7dbcf808cd729 | import torch
import torch.utils.data
import torch.nn.functional as F
def masked_softmax(x, m=None, dim=-1):
"""
Softmax with mask
:param x:
:param m:
:param dim:
:return:
"""
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=dim, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=dim, keepdim=True) + 1e-06)
return softmax
class Model(torch.nn.Module):
"""
one hidden layer and one softmax layer.
Args:
- input_size:
- hidden_size:
- output_size:
- dropout_p:
Inputs:
- x: (seq_len, batch, input_size)
- x_mask: (batch, seq_len)
Outputs:
- beta: (batch, seq_len)
"""
def __init__(self, input_size, hidden_size, dropout_p):
super().__init__()
self.linear_h = torch.nn.Linear(input_size, hidden_size)
self.linear_o = torch.nn.Linear(hidden_size, 1)
self.dropout = torch.nn.Dropout(p=dropout_p)
def forward(self, x, x_mask):
h = F.relu(self.linear_h(x))
h = self.dropout(h)
o = self.linear_o(h)
o = o.squeeze(2).transpose(0, 1)
beta = masked_softmax(o, x_mask, dim=1)
return beta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 0.5]
|
Str2MSA | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7r/c7ro6rxiigiepcbzbff4nww6gltt467oipk63naj2aqqrmk5eo55.py
# Topologically Sorted Source Nodes: [mask_s, lt], Original ATen: [aten.stack, aten.lt]
# Source node to ATen node mapping:
# lt => lt
# mask_s => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sub_1, %sub_3, %sub_5, %sub_7], 1), kwargs = {})
# %lt : [num_users=2] = call_function[target=torch.ops.aten.lt.Scalar](args = (%view, 0.5), kwargs = {})
triton_poi_fused_lt_stack_0 = async_compile.triton('triton_poi_fused_lt_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_lt_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_lt_stack_0(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 16
x0 = xindex % 4
x2 = (xindex // 64)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp4 & xmask, other=0.0)
tmp6 = 8.0
tmp7 = tmp5 - tmp6
tmp8 = tl.sigmoid(tmp7)
tmp9 = 1.0
tmp10 = tmp9 - tmp8
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tmp14 = tl.full([1], 8, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tmp13 & tmp15
tmp17 = tl.load(in_ptr0 + (x0 + (4*((-4) + x1)) + (16*x2)), tmp16 & xmask, other=0.0)
tmp18 = 12.0
tmp19 = tmp17 - tmp18
tmp20 = tl.sigmoid(tmp19)
tmp21 = tmp9 - tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp16, tmp21, tmp22)
tmp24 = tmp0 >= tmp14
tmp25 = tl.full([1], 12, tl.int64)
tmp26 = tmp0 < tmp25
tmp27 = tmp24 & tmp26
tmp28 = tl.load(in_ptr0 + (x0 + (4*((-8) + x1)) + (16*x2)), tmp27 & xmask, other=0.0)
tmp29 = 16.0
tmp30 = tmp28 - tmp29
tmp31 = tl.sigmoid(tmp30)
tmp32 = tmp9 - tmp31
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp27, tmp32, tmp33)
tmp35 = tmp0 >= tmp25
tmp36 = tl.full([1], 16, tl.int64)
tmp37 = tmp0 < tmp36
tmp38 = tl.load(in_ptr0 + (x0 + (4*((-12) + x1)) + (16*x2)), tmp35 & xmask, other=0.0)
tmp39 = 20.0
tmp40 = tmp38 - tmp39
tmp41 = tl.sigmoid(tmp40)
tmp42 = tmp9 - tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp35, tmp42, tmp43)
tmp45 = tl.where(tmp27, tmp34, tmp44)
tmp46 = tl.where(tmp16, tmp23, tmp45)
tmp47 = tl.where(tmp4, tmp12, tmp46)
tmp48 = 0.5
tmp49 = tmp47 < tmp48
tl.store(out_ptr1 + (x3), tmp49, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vk/cvkv6sboiitypmnyxfoq5lxlbcpaztaxqfkyjuv75l2bihdebyda.py
# Topologically Sorted Source Nodes: [mean, var, add, std, sub_8, x, x_1, x_2], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# std => sqrt
# sub_8 => sub_8
# var => var
# x => mul
# x_1 => div
# x_2 => add_1
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_2, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_2, [-1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sub_8), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %sqrt), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_4), kwargs = {})
triton_per_fused_add_div_mean_mul_sqrt_sub_var_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_sqrt_sub_var_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sqrt_sub_var_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 32
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (32*x0)), xmask, other=0.0)
tmp25 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (r1), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 32, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 32.0
tmp20 = tmp4 / tmp19
tmp21 = tmp18 / tmp19
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp26 = tmp0 - tmp20
tmp27 = tmp25 * tmp26
tmp28 = tmp27 / tmp24
tmp30 = tmp28 + tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp24, xmask)
tl.store(out_ptr0 + (r1 + (32*x0)), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pl/cplaegylqzwimiascwxpzxkzv6wll2horn756obggce4zcrfzgrt.py
# Topologically Sorted Source Nodes: [mean_2, var_1, add_1, std_1, sub_9, x_3, x_4, x_5], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add_1 => add_2
# mean_2 => mean_1
# std_1 => sqrt_1
# sub_9 => sub_9
# var_1 => var_1
# x_3 => mul_1
# x_4 => div_1
# x_5 => add_3
# Graph fragment:
# %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_5, [-1], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_5, [-1]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var_1, 1e-05), kwargs = {})
# %sqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_5, %mean_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %sub_9), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %sqrt_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, %primals_7), kwargs = {})
triton_per_fused_add_div_mean_mul_sqrt_sub_var_2 = async_compile.triton('triton_per_fused_add_div_mean_mul_sqrt_sub_var_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sqrt_sub_var_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp25 = tl.load(in_ptr1 + (r1), None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (r1), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = tmp18 / tmp19
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp26 = tmp0 - tmp20
tmp27 = tmp25 * tmp26
tmp28 = tmp27 / tmp24
tmp30 = tmp28 + tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp24, xmask)
tl.store(out_ptr0 + (r1 + (64*x0)), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5p/c5pibcu5puwpuh4keg4u5q6a3og6tddx2p4gwu2eiz33bu7uevc4.py
# Topologically Sorted Source Nodes: [q_1, attention], Original ATen: [aten.mul, aten.clone]
# Source node to ATen node mapping:
# attention => clone
# q_1 => mul_2
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_1, 0.17677669529663687), kwargs = {})
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_mul_3 = async_compile.triton('triton_poi_fused_clone_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128) % 4
x2 = (xindex // 512) % 4
x3 = (xindex // 2048)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x2) + (512*x1) + (2048*x3)), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.17677669529663687
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x4), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/eb/cebhq2wzqzzoga3nzeuthh267dho2v4y5qhop5lvv2hdwvyz2cju.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attention => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y2 = (yindex // 512)
y4 = yindex % 512
y0 = yindex % 128
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y4 + (512*x3) + (2048*y2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3 + (4*y5)), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3a/c3alfyfsoya27hzppi42pior7fgqmuicqa4rly5son4scj57qalq.py
# Topologically Sorted Source Nodes: [attention_1, attention_2], Original ATen: [aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attention_1 => full_default, where
# attention_2 => amax, exp, sub_10, sum_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -3.4028234663852886e+38), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%lt, %full_default, %view_15), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_10,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_masked_fill_5 = async_compile.triton('triton_poi_fused__softmax_masked_fill_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_masked_fill_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp5 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp13 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = -3.4028234663852886e+38
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp6 = tl.where(tmp4, tmp2, tmp5)
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp10 = tl.where(tmp8, tmp2, tmp9)
tmp11 = triton_helpers.maximum(tmp7, tmp10)
tmp14 = tl.where(tmp12, tmp2, tmp13)
tmp15 = triton_helpers.maximum(tmp11, tmp14)
tmp16 = tmp3 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp6 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp10 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + (x0), tmp15, xmask)
tl.store(out_ptr1 + (x0), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/me/cme7rwkpnkxnmmvnegbcsfbjinkqvdoz6iwka57dxuw6xlx2wczs.py
# Topologically Sorted Source Nodes: [attention_1, attention_2], Original ATen: [aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attention_1 => full_default, where
# attention_2 => amax, div_2, exp, sub_10
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -3.4028234663852886e+38), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%lt, %full_default, %view_15), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_10,), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_masked_fill_6 = async_compile.triton('triton_poi_fused__softmax_masked_fill_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_masked_fill_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_masked_fill_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = -3.4028234663852886e+38
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ee/ceevecisn47duwls63zer6rib567awr4dcrak3hmiibwxfbwmytd.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_10,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16) % 4
x2 = (xindex // 64) % 4
x3 = (xindex // 256) % 4
x4 = (xindex // 1024)
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x3) + (64*x2) + (256*x1) + (1024*x4)), None)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x3)), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x6), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qk/cqkv6yylcqvjh44xvdqzugpr6swgybxmvuj6md2wytdzfm5foaxn.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_12,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_8 = async_compile.triton('triton_poi_fused_clone_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16) % 4
x2 = (xindex // 64) % 4
x3 = (xindex // 256) % 4
x4 = (xindex // 1024)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x3) + (64*x2) + (256*x1) + (1024*x4)), None)
tl.store(out_ptr0 + (x5), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xj/cxjgml7brgyfptvdviubjhpqbykv4tqbxnuyvupgox6h4qoja4p5.py
# Topologically Sorted Source Nodes: [msa, mean_4, var_2, add_3, std_2, sub_10, x_6, x_7, x_8], Original ATen: [aten.add, aten.mean, aten.var, aten.sqrt, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add_3 => add_5
# mean_4 => mean_2
# msa => add_4
# std_2 => sqrt_2
# sub_10 => sub_11
# var_2 => var_2
# x_6 => mul_3
# x_7 => div_3
# x_8 => add_6
# Graph fragment:
# %add_4 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %view_22), kwargs = {})
# %mean_2 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%add_4, [-1], True), kwargs = {})
# %var_2 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add_4, [-1]), kwargs = {correction: 0, keepdim: True})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var_2, 1e-05), kwargs = {})
# %sqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_5,), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %mean_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_16, %sub_11), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, %sqrt_2), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_3, %primals_17), kwargs = {})
triton_per_fused_add_div_mean_mul_sqrt_sub_var_9 = async_compile.triton('triton_per_fused_add_div_mean_mul_sqrt_sub_var_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_sqrt_sub_var_9', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_9(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp27 = tl.load(in_ptr2 + (r1), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + (r1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp8 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp3 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 64.0
tmp22 = tmp6 / tmp21
tmp23 = tmp20 / tmp21
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp28 = tmp2 - tmp22
tmp29 = tmp27 * tmp28
tmp30 = tmp29 / tmp26
tmp32 = tmp30 + tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp22, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp26, xmask)
tl.store(out_ptr0 + (r1 + (64*x0)), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3n/c3nobtl75ypqh6uwttv2ohqh6coi5zxkbpfmq6oindp66qdf32fp.py
# Topologically Sorted Source Nodes: [relu_], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu_ => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_25,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_30, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_10 = async_compile.triton('triton_poi_fused_relu_threshold_backward_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_10(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x4), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, None)
tl.store(out_ptr0 + (x4), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kt/cktem4mm36mu2rsucwojg5yvdn3bhfqskq7cxhglpujhzg7gdof3.py
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.view]
# Source node to ATen node mapping:
# src => view_31
# Graph fragment:
# %view_31 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_30, [64, 256]), kwargs = {})
triton_poi_fused_view_11 = async_compile.triton('triton_poi_fused_view_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_11(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (1024*((x1 % 4) // 4)) + (4096*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ji/cjigcf3tl3mgt3p5in4eqr5xxvipm6a4n2yw6qeoi3b3rk3fmgma.py
# Topologically Sorted Source Nodes: [msa, msa_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# msa => add_4
# msa_1 => add_7
# Graph fragment:
# %add_4 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %view_22), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %view_32), kwargs = {})
triton_poi_fused_add_12 = async_compile.triton('triton_poi_fused_add_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x2), None)
tmp3 = tl.load(in_out_ptr0 + (x2), None)
tmp4 = tl.load(in_ptr2 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 32), (512, 128, 32, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (32, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4, 64), (1024, 256, 64, 1))
assert_size_stride(primals_6, (64, ), (1, ))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (128, 32), (32, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (128, 32), (32, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (64, 64), (64, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (64, 64), (64, 1))
assert_size_stride(primals_15, (64, ), (1, ))
assert_size_stride(primals_16, (64, ), (1, ))
assert_size_stride(primals_17, (64, ), (1, ))
assert_size_stride(primals_18, (256, 64), (64, 1))
assert_size_stride(primals_19, (256, ), (1, ))
assert_size_stride(primals_20, (64, 256), (256, 1))
assert_size_stride(primals_21, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [dist], Original ATen: [aten._cdist_forward]
buf0 = torch.ops.aten._cdist_forward.default(reinterpret_tensor(primals_1, (4, 4, 4), (64, 16, 1), 4), reinterpret_tensor(primals_1, (4, 4, 4), (64, 16, 1), 4), 2.0, None)
del primals_1
buf1 = buf0
del buf0
buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [mask_s, lt], Original ATen: [aten.stack, aten.lt]
stream0 = get_raw_stream(0)
triton_poi_fused_lt_stack_0.run(buf1, buf23, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf3 # reuse
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
buf15 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, var, add, std, sub_8, x, x_1, x_2], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
triton_per_fused_add_div_mean_mul_sqrt_sub_var_1.run(buf4, buf8, primals_2, primals_3, primals_4, buf15, 64, 32, grid=grid(64), stream=stream0)
del primals_3
del primals_4
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf9 # reuse
buf14 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf12 # reuse
buf18 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_2, var_1, add_1, std_1, sub_9, x_3, x_4, x_5], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
triton_per_fused_add_div_mean_mul_sqrt_sub_var_2.run(buf10, buf14, primals_5, primals_6, primals_7, buf18, 64, 64, grid=grid(64), stream=stream0)
del primals_6
del primals_7
buf16 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf15, (64, 32), (32, 1), 0), reinterpret_tensor(primals_8, (32, 128), (1, 32), 0), out=buf16)
buf17 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf15, (64, 32), (32, 1), 0), reinterpret_tensor(primals_10, (32, 128), (1, 32), 0), out=buf17)
buf19 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf18, (64, 64), (64, 1), 0), reinterpret_tensor(primals_12, (64, 64), (1, 64), 0), out=buf19)
buf20 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32)
# Topologically Sorted Source Nodes: [q_1, attention], Original ATen: [aten.mul, aten.clone]
triton_poi_fused_clone_mul_3.run(buf16, primals_9, buf20, 8192, grid=grid(8192), stream=stream0)
del primals_9
buf21 = reinterpret_tensor(buf16, (4, 4, 128, 4), (2048, 512, 4, 1), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf17, primals_11, buf21, 2048, 4, grid=grid(2048, 4), stream=stream0)
del buf17
del primals_11
buf22 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf20, (16, 4, 128), (512, 128, 1), 0), reinterpret_tensor(buf21, (16, 128, 4), (512, 4, 1), 0), out=buf22)
buf24 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf25 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attention_1, attention_2], Original ATen: [aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_masked_fill_5.run(buf23, buf22, buf24, buf25, 64, grid=grid(64), stream=stream0)
buf26 = reinterpret_tensor(buf22, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf22 # reuse
# Topologically Sorted Source Nodes: [attention_1, attention_2], Original ATen: [aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_masked_fill_6.run(buf26, buf23, buf24, buf25, 256, grid=grid(256), stream=stream0)
buf27 = empty_strided_cuda((4, 4, 4, 4, 16, 1), (1024, 256, 64, 16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf19, primals_13, buf27, 4096, grid=grid(4096), stream=stream0)
del primals_13
buf28 = reinterpret_tensor(buf19, (16, 4, 64), (256, 64, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf27, (16, 4, 64), (256, 64, 1), 0), out=buf28)
buf29 = empty_strided_cuda((4, 4, 4, 4, 16), (1024, 256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_8.run(buf28, buf29, 4096, grid=grid(4096), stream=stream0)
buf30 = reinterpret_tensor(buf28, (64, 64), (64, 1), 0); del buf28 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, reinterpret_tensor(buf29, (64, 64), (64, 1), 0), reinterpret_tensor(primals_14, (64, 64), (1, 64), 0), alpha=1, beta=1, out=buf30)
del primals_15
buf31 = buf25; del buf25 # reuse
buf34 = buf24; del buf24 # reuse
buf32 = reinterpret_tensor(buf31, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf31 # reuse
buf36 = reinterpret_tensor(buf34, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf34 # reuse
buf37 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [msa, mean_4, var_2, add_3, std_2, sub_10, x_6, x_7, x_8], Original ATen: [aten.add, aten.mean, aten.var, aten.sqrt, aten.sub, aten.mul, aten.div]
triton_per_fused_add_div_mean_mul_sqrt_sub_var_9.run(buf32, buf36, primals_5, buf30, primals_16, primals_17, buf37, 64, 64, grid=grid(64), stream=stream0)
del primals_17
buf38 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf37, (64, 64), (64, 1), 0), reinterpret_tensor(primals_18, (64, 256), (1, 64), 0), out=buf38)
buf39 = reinterpret_tensor(buf38, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf38 # reuse
buf43 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu_], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_10.run(buf39, primals_19, buf43, 16384, grid=grid(16384), stream=stream0)
del primals_19
buf40 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.view]
triton_poi_fused_view_11.run(buf39, buf40, 16384, grid=grid(16384), stream=stream0)
del buf39
buf41 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf40, reinterpret_tensor(primals_20, (256, 64), (1, 256), 0), out=buf41)
buf42 = reinterpret_tensor(buf41, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf41 # reuse
# Topologically Sorted Source Nodes: [msa, msa_1], Original ATen: [aten.add]
triton_poi_fused_add_12.run(buf42, primals_5, buf30, primals_21, 4096, grid=grid(4096), stream=stream0)
del primals_21
return (buf42, primals_2, primals_5, primals_16, buf4, buf8, buf10, buf14, reinterpret_tensor(buf15, (64, 32), (32, 1), 0), reinterpret_tensor(buf18, (64, 64), (64, 1), 0), buf23, buf26, reinterpret_tensor(buf29, (64, 64), (64, 1), 0), buf30, buf32, buf36, reinterpret_tensor(buf37, (64, 64), (64, 1), 0), buf40, primals_20, buf43, primals_18, primals_14, reinterpret_tensor(buf27, (16, 64, 4), (256, 1, 64), 0), reinterpret_tensor(buf20, (16, 128, 4), (512, 1, 128), 0), reinterpret_tensor(buf21, (16, 4, 128), (512, 1, 4), 0), primals_12, primals_10, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 32), (512, 128, 32, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 64), (1024, 256, 64, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((256, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((64, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_ff, p_drop=0.1):
super(FeedForwardLayer, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(p_drop, inplace=True)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, src):
src = self.linear2(self.dropout(F.relu_(self.linear1(src))))
return src
class MaskedDirectMultiheadAttention(nn.Module):
def __init__(self, d_in, d_out, heads, d_k=32, dropout=0.1):
super(MaskedDirectMultiheadAttention, self).__init__()
self.heads = heads
self.scaling = 1 / math.sqrt(d_k)
self.to_query = nn.Linear(d_in, heads * d_k)
self.to_key = nn.Linear(d_in, heads * d_k)
self.to_value = nn.Linear(d_out, d_out)
self.to_out = nn.Linear(d_out, d_out)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, query, key, value, mask):
batch, N, L = value.shape[:3]
q = self.to_query(query).view(batch, L, self.heads, -1).permute(0,
2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, -1).permute(0, 2, 1, 3)
v = self.to_value(value).view(batch, N, L, self.heads, -1).permute(
0, 3, 1, 2, 4)
q = q * self.scaling
attention = torch.matmul(q, k.transpose(-2, -1))
attention = attention.masked_fill(mask < 0.5, torch.finfo(q.dtype).min)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.einsum('bhij,bhnjk->bhnik', attention, v)
out = out.permute(0, 2, 3, 1, 4).contiguous().view(batch, N, L, -1)
out = self.to_out(out)
return out
class Str2MSA(nn.Module):
def __init__(self, d_msa=64, d_state=32, inner_dim=32, r_ff=4, distbin=
[8.0, 12.0, 16.0, 20.0], p_drop=0.1):
super(Str2MSA, self).__init__()
self.distbin = distbin
n_att_head = len(distbin)
self.norm_state = LayerNorm(d_state)
self.norm1 = LayerNorm(d_msa)
self.attn = MaskedDirectMultiheadAttention(d_state, d_msa,
n_att_head, d_k=inner_dim, dropout=p_drop)
self.dropout1 = nn.Dropout(p_drop, inplace=True)
self.norm2 = LayerNorm(d_msa)
self.ff = FeedForwardLayer(d_msa, d_msa * r_ff, p_drop=p_drop)
self.dropout2 = nn.Dropout(p_drop, inplace=True)
def forward(self, msa, xyz, state):
dist = torch.cdist(xyz[:, :, 1], xyz[:, :, 1])
mask_s = list()
for distbin in self.distbin:
mask_s.append(1.0 - torch.sigmoid(dist - distbin))
mask_s = torch.stack(mask_s, dim=1)
state = self.norm_state(state)
msa2 = self.norm1(msa)
msa2 = self.attn(state, state, msa2, mask_s)
msa = msa + self.dropout1(msa2)
msa2 = self.norm2(msa)
msa2 = self.ff(msa2)
msa = msa + self.dropout2(msa2)
return msa
def get_inputs():
return [torch.rand([4, 4, 4, 64]), torch.rand([4, 4, 4, 4]), torch.rand
([4, 4, 4, 32])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_lt_stack_0(in_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 16
x0 = xindex % 4
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = 8.0
tmp7 = tmp5 - tmp6
tmp8 = tl.sigmoid(tmp7)
tmp9 = 1.0
tmp10 = tmp9 - tmp8
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tmp14 = tl.full([1], 8, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tmp13 & tmp15
tmp17 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp16 & xmask,
other=0.0)
tmp18 = 12.0
tmp19 = tmp17 - tmp18
tmp20 = tl.sigmoid(tmp19)
tmp21 = tmp9 - tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp16, tmp21, tmp22)
tmp24 = tmp0 >= tmp14
tmp25 = tl.full([1], 12, tl.int64)
tmp26 = tmp0 < tmp25
tmp27 = tmp24 & tmp26
tmp28 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp27 & xmask,
other=0.0)
tmp29 = 16.0
tmp30 = tmp28 - tmp29
tmp31 = tl.sigmoid(tmp30)
tmp32 = tmp9 - tmp31
tmp33 = tl.full(tmp32.shape, 0.0, tmp32.dtype)
tmp34 = tl.where(tmp27, tmp32, tmp33)
tmp35 = tmp0 >= tmp25
tl.full([1], 16, tl.int64)
tmp38 = tl.load(in_ptr0 + (x0 + 4 * (-12 + x1) + 16 * x2), tmp35 &
xmask, other=0.0)
tmp39 = 20.0
tmp40 = tmp38 - tmp39
tmp41 = tl.sigmoid(tmp40)
tmp42 = tmp9 - tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp35, tmp42, tmp43)
tmp45 = tl.where(tmp27, tmp34, tmp44)
tmp46 = tl.where(tmp16, tmp23, tmp45)
tmp47 = tl.where(tmp4, tmp12, tmp46)
tmp48 = 0.5
tmp49 = tmp47 < tmp48
tl.store(out_ptr1 + x3, tmp49, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0)
tmp25 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 32, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 32.0
tmp20 = tmp4 / tmp19
tmp21 = tmp18 / tmp19
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp26 = tmp0 - tmp20
tmp27 = tmp25 * tmp26
tmp28 = tmp27 / tmp24
tmp30 = tmp28 + tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp24, xmask)
tl.store(out_ptr0 + (r1 + 32 * x0), tmp30, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_2(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp25 = tl.load(in_ptr1 + r1, None, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = tmp18 / tmp19
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp26 = tmp0 - tmp20
tmp27 = tmp25 * tmp26
tmp28 = tmp27 / tmp24
tmp30 = tmp28 + tmp29
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp24, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp30, xmask)
@triton.jit
def triton_poi_fused_clone_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 4
x2 = xindex // 512 % 4
x3 = xindex // 2048
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x2 + 512 * x1 + 2048 * x3), None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.17677669529663687
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, None)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y2 = yindex // 512
y4 = yindex % 512
y0 = yindex % 128
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y4 + 512 * x3 + 2048 * y2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3 + 4 * y5), tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp5 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp13 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = -3.4028234663852886e+38
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp6 = tl.where(tmp4, tmp2, tmp5)
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp10 = tl.where(tmp8, tmp2, tmp9)
tmp11 = triton_helpers.maximum(tmp7, tmp10)
tmp14 = tl.where(tmp12, tmp2, tmp13)
tmp15 = triton_helpers.maximum(tmp11, tmp14)
tmp16 = tmp3 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp6 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp10 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + x0, tmp15, xmask)
tl.store(out_ptr1 + x0, tmp26, xmask)
@triton.jit
def triton_poi_fused__softmax_masked_fill_6(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = -3.4028234663852886e+38
tmp3 = tl.where(tmp0, tmp2, tmp1)
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64 % 4
x3 = xindex // 256 % 4
x4 = xindex // 1024
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x3 + 64 * x2 + 256 * x1 + 1024 * x4
), None)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x3), None, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x6, tmp2, None)
@triton.jit
def triton_poi_fused_clone_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64 % 4
x3 = xindex // 256 % 4
x4 = xindex // 1024
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x3 + 64 * x2 + 256 * x1 + 1024 * x4
), None)
tl.store(out_ptr0 + x5, tmp0, None)
@triton.jit
def triton_per_fused_add_div_mean_mul_sqrt_sub_var_9(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp27 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + r1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp8 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp13 = tmp12.to(tl.float32)
tmp14 = tmp11 / tmp13
tmp15 = tmp3 - tmp14
tmp16 = tmp15 * tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.where(xmask, tmp17, 0)
tmp20 = tl.sum(tmp19, 1)[:, None]
tmp21 = 64.0
tmp22 = tmp6 / tmp21
tmp23 = tmp20 / tmp21
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp28 = tmp2 - tmp22
tmp29 = tmp27 * tmp28
tmp30 = tmp29 / tmp26
tmp32 = tmp30 + tmp31
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp22, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp26, xmask)
tl.store(out_ptr0 + (r1 + 64 * x0), tmp32, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_10(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, None)
tl.store(out_ptr0 + x4, tmp6, None)
@triton.jit
def triton_poi_fused_view_11(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 1024 * (x1 % 4 // 4) + 4096 *
((4 * (x1 // 4 % 4) + x1 % 4) // 16)), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x2, None)
tmp3 = tl.load(in_out_ptr0 + x2, None)
tmp4 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 32), (512, 128, 32, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (4, 4, 4, 64), (1024, 256, 64, 1))
assert_size_stride(primals_6, (64,), (1,))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (128, 32), (32, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (128, 32), (32, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (64, 64), (64, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (64, 64), (64, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64,), (1,))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (256, 64), (64, 1))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (64, 256), (256, 1))
assert_size_stride(primals_21, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._cdist_forward.default(reinterpret_tensor(
primals_1, (4, 4, 4), (64, 16, 1), 4), reinterpret_tensor(
primals_1, (4, 4, 4), (64, 16, 1), 4), 2.0, None)
del primals_1
buf1 = buf0
del buf0
buf23 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_lt_stack_0[grid(256)](buf1, buf23, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf3
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
buf15 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.
float32)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_1[grid(64)](buf4,
buf8, primals_2, primals_3, primals_4, buf15, 64, 32, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_3
del primals_4
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
buf14 = reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf12
buf18 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.float32)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_2[grid(64)](buf10,
buf14, primals_5, primals_6, primals_7, buf18, 64, 64, XBLOCK=8,
num_warps=4, num_stages=1)
del primals_6
del primals_7
buf16 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf15, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_8, (32, 128), (1, 32), 0), out=buf16)
buf17 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf15, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_10, (32, 128), (1, 32), 0), out=buf17)
buf19 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_12, (64, 64), (1, 64), 0), out=buf19)
buf20 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.float32)
triton_poi_fused_clone_mul_3[grid(8192)](buf16, primals_9, buf20,
8192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf21 = reinterpret_tensor(buf16, (4, 4, 128, 4), (2048, 512, 4, 1), 0)
del buf16
triton_poi_fused_clone_4[grid(2048, 4)](buf17, primals_11, buf21,
2048, 4, XBLOCK=1, YBLOCK=256, num_warps=4, num_stages=1)
del buf17
del primals_11
buf22 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf20, (16, 4, 128), (512,
128, 1), 0), reinterpret_tensor(buf21, (16, 128, 4), (512, 4, 1
), 0), out=buf22)
buf24 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf25 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_masked_fill_5[grid(64)](buf23, buf22,
buf24, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf26 = reinterpret_tensor(buf22, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf22
triton_poi_fused__softmax_masked_fill_6[grid(256)](buf26, buf23,
buf24, buf25, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((4, 4, 4, 4, 16, 1), (1024, 256, 64, 16,
1, 1), torch.float32)
triton_poi_fused_clone_7[grid(4096)](buf19, primals_13, buf27, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf28 = reinterpret_tensor(buf19, (16, 4, 64), (256, 64, 1), 0)
del buf19
extern_kernels.bmm(reinterpret_tensor(buf26, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf27, (16, 4, 64), (256, 64, 1), 0),
out=buf28)
buf29 = empty_strided_cuda((4, 4, 4, 4, 16), (1024, 256, 64, 16, 1),
torch.float32)
triton_poi_fused_clone_8[grid(4096)](buf28, buf29, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
buf30 = reinterpret_tensor(buf28, (64, 64), (64, 1), 0)
del buf28
extern_kernels.addmm(primals_15, reinterpret_tensor(buf29, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_14, (64, 64), (1, 64),
0), alpha=1, beta=1, out=buf30)
del primals_15
buf31 = buf25
del buf25
buf34 = buf24
del buf24
buf32 = reinterpret_tensor(buf31, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf31
buf36 = reinterpret_tensor(buf34, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf34
buf37 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch
.float32)
triton_per_fused_add_div_mean_mul_sqrt_sub_var_9[grid(64)](buf32,
buf36, primals_5, buf30, primals_16, primals_17, buf37, 64, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_17
buf38 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf37, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_18, (64, 256), (1, 64), 0), out=buf38)
buf39 = reinterpret_tensor(buf38, (4, 4, 4, 256), (4096, 1024, 256,
1), 0)
del buf38
buf43 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_10[grid(16384)](buf39,
primals_19, buf43, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf40 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
triton_poi_fused_view_11[grid(16384)](buf39, buf40, 16384, XBLOCK=
256, num_warps=4, num_stages=1)
del buf39
buf41 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(buf40, reinterpret_tensor(primals_20, (256, 64),
(1, 256), 0), out=buf41)
buf42 = reinterpret_tensor(buf41, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf41
triton_poi_fused_add_12[grid(4096)](buf42, primals_5, buf30,
primals_21, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_21
return (buf42, primals_2, primals_5, primals_16, buf4, buf8, buf10,
buf14, reinterpret_tensor(buf15, (64, 32), (32, 1), 0),
reinterpret_tensor(buf18, (64, 64), (64, 1), 0), buf23, buf26,
reinterpret_tensor(buf29, (64, 64), (64, 1), 0), buf30, buf32,
buf36, reinterpret_tensor(buf37, (64, 64), (64, 1), 0), buf40,
primals_20, buf43, primals_18, primals_14, reinterpret_tensor(buf27,
(16, 64, 4), (256, 1, 64), 0), reinterpret_tensor(buf20, (16, 128,
4), (512, 1, 128), 0), reinterpret_tensor(buf21, (16, 4, 128), (512,
1, 4), 0), primals_12, primals_10, primals_8)
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_ff, p_drop=0.1):
super(FeedForwardLayer, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(p_drop, inplace=True)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, src):
src = self.linear2(self.dropout(F.relu_(self.linear1(src))))
return src
class MaskedDirectMultiheadAttention(nn.Module):
def __init__(self, d_in, d_out, heads, d_k=32, dropout=0.1):
super(MaskedDirectMultiheadAttention, self).__init__()
self.heads = heads
self.scaling = 1 / math.sqrt(d_k)
self.to_query = nn.Linear(d_in, heads * d_k)
self.to_key = nn.Linear(d_in, heads * d_k)
self.to_value = nn.Linear(d_out, d_out)
self.to_out = nn.Linear(d_out, d_out)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, query, key, value, mask):
batch, N, L = value.shape[:3]
q = self.to_query(query).view(batch, L, self.heads, -1).permute(0,
2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, -1).permute(0, 2, 1, 3)
v = self.to_value(value).view(batch, N, L, self.heads, -1).permute(
0, 3, 1, 2, 4)
q = q * self.scaling
attention = torch.matmul(q, k.transpose(-2, -1))
attention = attention.masked_fill(mask < 0.5, torch.finfo(q.dtype).min)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.einsum('bhij,bhnjk->bhnik', attention, v)
out = out.permute(0, 2, 3, 1, 4).contiguous().view(batch, N, L, -1)
out = self.to_out(out)
return out
class Str2MSANew(nn.Module):
def __init__(self, d_msa=64, d_state=32, inner_dim=32, r_ff=4, distbin=
[8.0, 12.0, 16.0, 20.0], p_drop=0.1):
super(Str2MSANew, self).__init__()
self.distbin = distbin
n_att_head = len(distbin)
self.norm_state = LayerNorm(d_state)
self.norm1 = LayerNorm(d_msa)
self.attn = MaskedDirectMultiheadAttention(d_state, d_msa,
n_att_head, d_k=inner_dim, dropout=p_drop)
self.dropout1 = nn.Dropout(p_drop, inplace=True)
self.norm2 = LayerNorm(d_msa)
self.ff = FeedForwardLayer(d_msa, d_msa * r_ff, p_drop=p_drop)
self.dropout2 = nn.Dropout(p_drop, inplace=True)
def forward(self, input_0, input_1, input_2):
primals_3 = self.norm_state.a_2
primals_4 = self.norm_state.b_2
primals_6 = self.norm1.a_2
primals_7 = self.norm1.b_2
primals_8 = self.attn.to_query.weight
primals_9 = self.attn.to_query.bias
primals_10 = self.attn.to_key.weight
primals_11 = self.attn.to_key.bias
primals_12 = self.attn.to_value.weight
primals_13 = self.attn.to_value.bias
primals_14 = self.attn.to_out.weight
primals_15 = self.attn.to_out.bias
primals_16 = self.norm2.a_2
primals_17 = self.norm2.b_2
primals_18 = self.ff.linear1.weight
primals_19 = self.ff.linear1.bias
primals_20 = self.ff.linear2.weight
primals_21 = self.ff.linear2.bias
primals_5 = input_0
primals_1 = input_1
primals_2 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
| wukevin/RoseTTAFold | Str2MSA | false | 4,590 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super().__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_ff, p_drop=0.1):
super().__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(p_drop, inplace=True)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, src):
src = self.linear2(self.dropout(F.relu_(self.linear1(src))))
return src
class MaskedDirectMultiheadAttention(nn.Module):
def __init__(self, d_in, d_out, heads, d_k=32, dropout=0.1):
super().__init__()
self.heads = heads
self.scaling = 1 / math.sqrt(d_k)
self.to_query = nn.Linear(d_in, heads * d_k)
self.to_key = nn.Linear(d_in, heads * d_k)
self.to_value = nn.Linear(d_out, d_out)
self.to_out = nn.Linear(d_out, d_out)
self.dropout = nn.Dropout(dropout, inplace=True)
def forward(self, query, key, value, mask):
batch, N, L = value.shape[:3]
q = self.to_query(query).view(batch, L, self.heads, -1).permute(0,
2, 1, 3)
k = self.to_key(key).view(batch, L, self.heads, -1).permute(0, 2, 1, 3)
v = self.to_value(value).view(batch, N, L, self.heads, -1).permute(
0, 3, 1, 2, 4)
q = q * self.scaling
attention = torch.matmul(q, k.transpose(-2, -1))
attention = attention.masked_fill(mask < 0.5, torch.finfo(q.dtype).min)
attention = F.softmax(attention, dim=-1)
attention = self.dropout(attention)
out = torch.einsum('bhij,bhnjk->bhnik', attention, v)
out = out.permute(0, 2, 3, 1, 4).contiguous().view(batch, N, L, -1)
out = self.to_out(out)
return out
class Model(nn.Module):
def __init__(self, d_msa=64, d_state=32, inner_dim=32, r_ff=4, distbin=
[8.0, 12.0, 16.0, 20.0], p_drop=0.1):
super().__init__()
self.distbin = distbin
n_att_head = len(distbin)
self.norm_state = LayerNorm(d_state)
self.norm1 = LayerNorm(d_msa)
self.attn = MaskedDirectMultiheadAttention(d_state, d_msa,
n_att_head, d_k=inner_dim, dropout=p_drop)
self.dropout1 = nn.Dropout(p_drop, inplace=True)
self.norm2 = LayerNorm(d_msa)
self.ff = FeedForwardLayer(d_msa, d_msa * r_ff, p_drop=p_drop)
self.dropout2 = nn.Dropout(p_drop, inplace=True)
def forward(self, msa, xyz, state):
dist = torch.cdist(xyz[:, :, 1], xyz[:, :, 1])
mask_s = list()
for distbin in self.distbin:
mask_s.append(1.0 - torch.sigmoid(dist - distbin))
mask_s = torch.stack(mask_s, dim=1)
state = self.norm_state(state)
msa2 = self.norm1(msa)
msa2 = self.attn(state, state, msa2, mask_s)
msa = msa + self.dropout1(msa2)
msa2 = self.norm2(msa)
msa2 = self.ff(msa2)
msa = msa + self.dropout2(msa2)
return msa
def get_inputs():
return [torch.rand([4, 4, 4, 64]), torch.rand([4, 4, 4, 4]), torch.rand
([4, 4, 4, 32])]
def get_init_inputs():
return []
|
LogitBinaryCrossEntropy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/w6/cw6fz5s67xdzo5cx2rd5szgts4uezpgfkawkkrijlxp626dz4bl6.py
# Topologically Sorted Source Nodes: [loss, loss_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul]
# Source node to ATen node mapping:
# loss => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# loss_1 => mul_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 4), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_mul_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 4.0
tmp19 = tmp17 * tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss, loss_1], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class LogitBinaryCrossEntropy(nn.Module):
def __init__(self):
super(LogitBinaryCrossEntropy, self).__init__()
def forward(self, pred_score, target_score, weights=None):
loss = F.binary_cross_entropy_with_logits(pred_score, target_score,
size_average=True)
loss = loss * target_score.size(1)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 4.0
tmp19 = tmp17 * tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_mul_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class LogitBinaryCrossEntropyNew(nn.Module):
def __init__(self):
super(LogitBinaryCrossEntropyNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| xymtxwd/OSDA_with_soft_rejection | LogitBinaryCrossEntropy | false | 4,591 | [
"MIT"
] | 0 | a71394ae755c663508b33d3dddb1204ce7cb3fc0 | https://github.com/xymtxwd/OSDA_with_soft_rejection/tree/a71394ae755c663508b33d3dddb1204ce7cb3fc0 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred_score, target_score, weights=None):
loss = F.binary_cross_entropy_with_logits(pred_score, target_score,
size_average=True)
loss = loss * target_score.size(1)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Conv2dSame | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xs/cxs2a7zwcw5yxvn445xldhvii7772mtsthpxnfawxoahvyf3vtaj.py
# Topologically Sorted Source Nodes: [input_], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# input_ => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_3, [1, 2, 1, 2], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 7) % 7
x0 = xindex % 7
x2 = (xindex // 49)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_3, buf0, 784, grid=grid(784), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf2, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.functional as F
from typing import Optional
from typing import Tuple
import torch.nn.parallel
import torch.optim
def _calc_same_pad(input_: 'int', kernel: 'int', stride: 'int', dilation: 'int'
):
"""calculate same padding"""
return max((-(input_ // -stride) - 1) * stride + (kernel - 1) *
dilation + 1 - input_, 0)
def conv2d_same(input_, weight: 'torch.Tensor', bias:
'Optional[torch.Tensor]'=None, stride: 'Tuple[int, int]'=(1, 1),
padding: 'Tuple[int, int]'=(0, 0), dilation: 'Tuple[int, int]'=(1, 1),
groups: 'int'=1):
"""conv2d with same padding"""
input_height, input_width = input_.size()[-2:]
kernel_height, kernel_width = weight.size()[-2:]
pad_h = _calc_same_pad(input_height, kernel_height, stride[0], dilation[0])
pad_w = _calc_same_pad(input_width, kernel_width, stride[1], dilation[1])
input_ = F.pad(input_, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2])
return F.conv2d(input_, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
def forward(self, input_):
return conv2d_same(input_, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.functional as F
from typing import Optional
from typing import Tuple
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_3, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, primals_1, buf0
def _calc_same_pad(input_: 'int', kernel: 'int', stride: 'int', dilation: 'int'
):
"""calculate same padding"""
return max((-(input_ // -stride) - 1) * stride + (kernel - 1) *
dilation + 1 - input_, 0)
def conv2d_same(input_, weight: 'torch.Tensor', bias:
'Optional[torch.Tensor]'=None, stride: 'Tuple[int, int]'=(1, 1),
padding: 'Tuple[int, int]'=(0, 0), dilation: 'Tuple[int, int]'=(1, 1),
groups: 'int'=1):
"""conv2d with same padding"""
input_height, input_width = input_.size()[-2:]
kernel_height, kernel_width = weight.size()[-2:]
pad_h = _calc_same_pad(input_height, kernel_height, stride[0], dilation[0])
pad_w = _calc_same_pad(input_width, kernel_width, stride[1], dilation[1])
input_ = F.pad(input_, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2])
return F.conv2d(input_, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSameNew(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| xmyyzy123/zen_nas | Conv2dSame | false | 4,592 | [
"Apache-2.0"
] | 0 | 4870eb0a030856bd67afe8529f65af8dc3bd81dc | https://github.com/xmyyzy123/zen_nas/tree/4870eb0a030856bd67afe8529f65af8dc3bd81dc | import torch
import torch.utils.data
import torch.utils.data.distributed
from torch import nn
import torch.nn.functional as F
from typing import Optional
from typing import Tuple
import torch.nn.parallel
import torch.optim
def _calc_same_pad(input_: 'int', kernel: 'int', stride: 'int', dilation: 'int'
):
"""calculate same padding"""
return max((-(input_ // -stride) - 1) * stride + (kernel - 1) *
dilation + 1 - input_, 0)
def conv2d_same(input_, weight: 'torch.Tensor', bias:
'Optional[torch.Tensor]'=None, stride: 'Tuple[int, int]'=(1, 1),
padding: 'Tuple[int, int]'=(0, 0), dilation: 'Tuple[int, int]'=(1, 1),
groups: 'int'=1):
"""conv2d with same padding"""
input_height, input_width = input_.size()[-2:]
kernel_height, kernel_width = weight.size()[-2:]
pad_h = _calc_same_pad(input_height, kernel_height, stride[0], dilation[0])
pad_w = _calc_same_pad(input_width, kernel_width, stride[1], dilation[1])
input_ = F.pad(input_, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2])
return F.conv2d(input_, weight, bias, stride, (0, 0), dilation, groups)
class Model(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
def forward(self, input_):
return conv2d_same(input_, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
DirectEncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ml/cml5quc3cicwyzfl3nbiu7tgo6bcqkqozy7yi44fbmbeevdan72v.py
# Topologically Sorted Source Nodes: [add, src, mean, var], Original ATen: [aten.add, aten.mul, aten.mean, aten.var]
# Source node to ATen node mapping:
# add => add
# mean => mean
# src => mul
# var => var
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %permute), kwargs = {})
# %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%mul, [-1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_mean_mul_var_0 = async_compile.triton('triton_poi_fused_add_mean_mul_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_var_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_var_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + ((4*x1) + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1) + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x1) + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x1) + (16*x0) + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 * tmp3
tmp9 = tmp4 + tmp8
tmp12 = tmp10 + tmp11
tmp13 = tmp12 * tmp3
tmp14 = tmp9 + tmp13
tmp17 = tmp15 + tmp16
tmp18 = tmp17 * tmp3
tmp19 = tmp14 + tmp18
tmp20 = 4.0
tmp21 = tmp19 / tmp20
tmp22 = tmp4 - tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp8 - tmp21
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = tmp13 - tmp21
tmp28 = tmp27 * tmp27
tmp29 = tmp26 + tmp28
tmp30 = tmp18 - tmp21
tmp31 = tmp30 * tmp30
tmp32 = tmp29 + tmp31
tmp33 = tmp32 / tmp20
tl.store(in_out_ptr0 + (x3), tmp33, xmask)
tl.store(out_ptr0 + (x3), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6x/c6x6aspyrf7ugqpqiv4vpyxkbcax7qwipmhc5xui6nshlqdkb32m.py
# Topologically Sorted Source Nodes: [mean_2, var_1, add_2, std_1, sub_1, x_3, x_4, x_5], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add_2 => add_3
# mean_2 => mean_1
# std_1 => sqrt_1
# sub_1 => sub_1
# var_1 => var_1
# x_3 => mul_2
# x_4 => div_1
# x_5 => add_4
# Graph fragment:
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 0, keepdim: True})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var_1, 1e-05), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_3,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %sub_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, %sqrt_1), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, %primals_6), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_1 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_var_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_var_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 / tmp9
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp12 / tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ue/cueddqtdpwf2p7c6f7arydjzzs6ehyu7hxf7on6kvogd2pwyf2mh.py
# Topologically Sorted Source Nodes: [add, src, mean, add_1, std, sub, x, x_1, x_2], Original ATen: [aten.add, aten.mul, aten.mean, aten.sqrt, aten.sub, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mean => mean
# src => mul
# std => sqrt
# sub => sub
# x => mul_1
# x_1 => div
# x_2 => add_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %permute), kwargs = {})
# %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [-1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mean), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sub), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %sqrt), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_4), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x5 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask)
tmp2 = tl.load(in_ptr1 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp6 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp8 = tmp0 * tmp7
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp8 / tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x4), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ow/cowwhsc5cs2qznzwwjc3lfnep2kbdduqlytkv5dxjwuphmtmuvmg.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub_2
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7n/c7nnilyo4q6dbpsdqzfxx6egbyaircr6jxypw4w5w6idrxgklgkm.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_4,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yq/cyqrw5qdv667sesxsni3ehsv3wipz7tv4sr5jiwwgel4j7hbuij4.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
x2 = xindex % 4
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x4) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + y0 + (4*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x4 + (16*y5)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/f5/cf54cp2yj2nqsdqclstksyuxqtb64paty3icoqv6sr4eroweijif.py
# Topologically Sorted Source Nodes: [tgt_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# tgt_1 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2p/c2p43ejeskhy446eg25namhtbbtso52ztjc25eznzg53lifxnvs5.py
# Topologically Sorted Source Nodes: [mean_4, var_2], Original ATen: [aten.mean, aten.var]
# Source node to ATen node mapping:
# mean_4 => mean_2
# var_2 => var_2
# Graph fragment:
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_13, [-1], True), kwargs = {})
# %var_2 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view_13, [-1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_mean_var_7 = async_compile.triton('triton_poi_fused_mean_var_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_var_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_var_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(in_out_ptr0 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2u/c2u66h2ot625qdzyd3nlvceuwf72o5h7ohs2fiamn65g2xgukloy.py
# Topologically Sorted Source Nodes: [add_4, std_2, sub_2, x_6, x_7, x_8], Original ATen: [aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add_4 => add_7
# std_2 => sqrt_2
# sub_2 => sub_3
# x_6 => mul_3
# x_7 => div_3
# x_8 => add_8
# Graph fragment:
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var_2, 1e-05), kwargs = {})
# %sqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_7,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_13, %mean_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_13, %sub_3), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, %sqrt_2), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_3, %primals_14), kwargs = {})
triton_poi_fused_add_div_mul_sqrt_sub_8 = async_compile.triton('triton_poi_fused_add_div_mul_sqrt_sub_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sqrt_sub_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sqrt_sub_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp3 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = tmp1 + tmp4
tmp7 = tmp5 - tmp6
tmp8 = tmp0 * tmp7
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp8 / tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5s/c5stk5glscu2vhkyygzl37kwox56bq47ibhaq46siyunrac2divm.py
# Topologically Sorted Source Nodes: [relu_], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu_ => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_18,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_23, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cs/ccsko6axmghf5opun7oldfuzwck2miyrvybdwzv5jxatbhz5oca4.py
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.view]
# Source node to ATen node mapping:
# src_1 => view_24
# Graph fragment:
# %view_24 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_23, [64, 4]), kwargs = {})
triton_poi_fused_view_10 = async_compile.triton('triton_poi_fused_view_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3n/c3nfizw2ntzxzaq4lgbgvecmi2qgnixh7vzgsb34mjjrfelxsr22.py
# Topologically Sorted Source Nodes: [tgt_1, tgt_2, tgt_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# tgt_1 => add_5
# tgt_2 => add_6
# tgt_3 => add_9
# Graph fragment:
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_12, %primals_12), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_5), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %view_25), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [add, src, mean, var], Original ATen: [aten.add, aten.mul, aten.mean, aten.var]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_mul_var_0.run(buf1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_2, var_1, add_2, std_1, sub_1, x_3, x_4, x_5], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_1.run(primals_5, primals_1, primals_6, buf3, 256, grid=grid(256), stream=stream0)
del primals_5
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, src, mean, add_1, std, sub, x, x_1, x_2], Original ATen: [aten.add, aten.mul, aten.mean, aten.sqrt, aten.sub, aten.div]
triton_poi_fused_add_div_mean_mul_sqrt_sub_2.run(primals_3, primals_2, buf2, buf1, primals_4, buf4, 256, grid=grid(256), stream=stream0)
del primals_3
del primals_4
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf7, primals_10, buf8, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_10
buf9 = reinterpret_tensor(buf7, (4, 1, 4, 4, 4), (64, 1, 16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf6, buf9, 16, 16, grid=grid(16, 16), stream=stream0)
buf10 = reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt_1], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf10, buf11, 64, 4, grid=grid(64, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [tgt_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf12)
buf13 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0); del buf2 # reuse
buf14 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 64), 0); del buf1 # reuse
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [mean_4, var_2], Original ATen: [aten.mean, aten.var]
triton_poi_fused_mean_var_7.run(buf15, primals_1, buf12, primals_12, buf13, 64, grid=grid(64), stream=stream0)
buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_4, std_2, sub_2, x_6, x_7, x_8], Original ATen: [aten.add, aten.sqrt, aten.sub, aten.mul, aten.div]
triton_poi_fused_add_div_mul_sqrt_sub_8.run(primals_13, primals_1, buf12, primals_12, buf13, buf15, primals_14, buf16, 256, grid=grid(256), stream=stream0)
del buf13
del buf15
del primals_14
buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf17)
buf18 = reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf17 # reuse
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu_], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_9.run(buf18, primals_16, buf22, 256, grid=grid(256), stream=stream0)
del primals_16
buf19 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.view]
triton_poi_fused_view_10.run(buf18, buf19, 256, grid=grid(256), stream=stream0)
buf20 = reinterpret_tensor(buf18, (64, 4), (4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf19, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf20)
buf21 = reinterpret_tensor(buf20, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf20 # reuse
# Topologically Sorted Source Nodes: [tgt_1, tgt_2, tgt_3], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf21, primals_1, buf12, primals_12, primals_18, 256, grid=grid(256), stream=stream0)
del primals_18
return (buf21, primals_1, primals_2, primals_12, primals_13, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf5, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf11, (64, 4), (4, 1), 0), buf12, reinterpret_tensor(buf16, (64, 4), (4, 1), 0), buf19, primals_17, buf22, primals_15, primals_11, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0), primals_9, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_ff, p_drop=0.1):
super(FeedForwardLayer, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(p_drop, inplace=True)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, src):
src = self.linear2(self.dropout(F.relu_(self.linear1(src))))
return src
class DirectMultiheadAttention(nn.Module):
def __init__(self, d_in, d_out, heads, dropout=0.1):
super(DirectMultiheadAttention, self).__init__()
self.heads = heads
self.proj_pair = nn.Linear(d_in, heads)
self.drop = nn.Dropout(dropout, inplace=True)
self.proj_msa = nn.Linear(d_out, d_out)
self.proj_out = nn.Linear(d_out, d_out)
def forward(self, src, tgt):
B, N, L = tgt.shape[:3]
attn_map = F.softmax(self.proj_pair(src), dim=1).permute(0, 3, 1, 2)
attn_map = self.drop(attn_map).unsqueeze(1)
value = self.proj_msa(tgt).permute(0, 3, 1, 2).contiguous().view(B,
-1, self.heads, N, L)
tgt = torch.matmul(value, attn_map).view(B, -1, N, L).permute(0, 2,
3, 1)
tgt = self.proj_out(tgt)
return tgt
class DirectEncoderLayer(nn.Module):
def __init__(self, heads, d_in, d_out, d_ff, symmetrize=True, p_drop=0.1):
super(DirectEncoderLayer, self).__init__()
self.symmetrize = symmetrize
self.attn = DirectMultiheadAttention(d_in, d_out, heads, dropout=p_drop
)
self.ff = FeedForwardLayer(d_out, d_ff, p_drop=p_drop)
self.drop_1 = nn.Dropout(p_drop, inplace=True)
self.drop_2 = nn.Dropout(p_drop, inplace=True)
self.norm = LayerNorm(d_in)
self.norm1 = LayerNorm(d_out)
self.norm2 = LayerNorm(d_out)
def forward(self, src, tgt):
B, N, L = tgt.shape[:3]
if self.symmetrize:
src = 0.5 * (src + src.permute(0, 2, 1, 3))
src = self.norm(src)
tgt2 = self.norm1(tgt)
tgt2 = self.attn(src, tgt2)
tgt = tgt + self.drop_1(tgt2)
tgt2 = self.norm2(tgt.view(B * N, L, -1)).view(B, N, L, -1)
tgt2 = self.ff(tgt2)
tgt = tgt + self.drop_2(tgt2)
return tgt
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'heads': 4, 'd_in': 4, 'd_out': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_mul_var_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 * x1 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1 + 16 * x0 + 64 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 * tmp3
tmp9 = tmp4 + tmp8
tmp12 = tmp10 + tmp11
tmp13 = tmp12 * tmp3
tmp14 = tmp9 + tmp13
tmp17 = tmp15 + tmp16
tmp18 = tmp17 * tmp3
tmp19 = tmp14 + tmp18
tmp20 = 4.0
tmp21 = tmp19 / tmp20
tmp22 = tmp4 - tmp21
tmp23 = tmp22 * tmp22
tmp24 = tmp8 - tmp21
tmp25 = tmp24 * tmp24
tmp26 = tmp23 + tmp25
tmp27 = tmp13 - tmp21
tmp28 = tmp27 * tmp27
tmp29 = tmp26 + tmp28
tmp30 = tmp18 - tmp21
tmp31 = tmp30 * tmp30
tmp32 = tmp29 + tmp31
tmp33 = tmp32 / tmp20
tl.store(in_out_ptr0 + x3, tmp33, xmask)
tl.store(out_ptr0 + x3, tmp21, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = tmp23 / tmp9
tmp25 = 1e-05
tmp26 = tmp24 + tmp25
tmp27 = libdevice.sqrt(tmp26)
tmp28 = tmp12 / tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp2 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 0.5
tmp5 = tmp3 * tmp4
tmp7 = tmp5 - tmp6
tmp8 = tmp0 * tmp7
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp8 / tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x4, tmp15, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y0 = yindex % 4
y1 = yindex // 4
x2 = xindex % 4
y5 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x4 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + y0 + 4 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x4 + 16 * y5), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_mean_var_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(in_out_ptr0 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sqrt_sub_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = tmp1 + tmp4
tmp7 = tmp5 - tmp6
tmp8 = tmp0 * tmp7
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp8 / tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_var_0[grid(64)](buf1, primals_2, buf2,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_var_1[grid(256)](primals_5,
primals_1, primals_6, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
del primals_6
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_2[grid(256)](primals_3,
primals_2, buf2, buf1, primals_4, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
del primals_4
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf4, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 16)](buf7, primals_10, buf8, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_10
buf9 = reinterpret_tensor(buf7, (4, 1, 4, 4, 4), (64, 1, 16, 4, 1), 0)
del buf7
triton_poi_fused_clone_5[grid(16, 16)](buf6, buf9, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0)
del buf6
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_6[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf12)
buf13 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 64), 0)
del buf2
buf14 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 64), 0)
del buf1
buf15 = buf14
del buf14
triton_poi_fused_mean_var_7[grid(64)](buf15, primals_1, buf12,
primals_12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_sqrt_sub_8[grid(256)](primals_13,
primals_1, buf12, primals_12, buf13, buf15, primals_14, buf16,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf13
del buf15
del primals_14
buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf17)
buf18 = reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf17
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_9[grid(256)](buf18,
primals_16, buf22, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_16
buf19 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_10[grid(256)](buf18, buf19, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf20 = reinterpret_tensor(buf18, (64, 4), (4, 1), 0)
del buf18
extern_kernels.mm(buf19, reinterpret_tensor(primals_17, (4, 4), (1,
4), 0), out=buf20)
buf21 = reinterpret_tensor(buf20, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf20
triton_poi_fused_add_11[grid(256)](buf21, primals_1, buf12,
primals_12, primals_18, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_18
return (buf21, primals_1, primals_2, primals_12, primals_13,
reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf5,
reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(
buf11, (64, 4), (4, 1), 0), buf12, reinterpret_tensor(buf16, (64, 4
), (4, 1), 0), buf19, primals_17, buf22, primals_15, primals_11,
reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0), primals_9,
primals_7)
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_ff, p_drop=0.1):
super(FeedForwardLayer, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(p_drop, inplace=True)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, src):
src = self.linear2(self.dropout(F.relu_(self.linear1(src))))
return src
class DirectMultiheadAttention(nn.Module):
def __init__(self, d_in, d_out, heads, dropout=0.1):
super(DirectMultiheadAttention, self).__init__()
self.heads = heads
self.proj_pair = nn.Linear(d_in, heads)
self.drop = nn.Dropout(dropout, inplace=True)
self.proj_msa = nn.Linear(d_out, d_out)
self.proj_out = nn.Linear(d_out, d_out)
def forward(self, src, tgt):
B, N, L = tgt.shape[:3]
attn_map = F.softmax(self.proj_pair(src), dim=1).permute(0, 3, 1, 2)
attn_map = self.drop(attn_map).unsqueeze(1)
value = self.proj_msa(tgt).permute(0, 3, 1, 2).contiguous().view(B,
-1, self.heads, N, L)
tgt = torch.matmul(value, attn_map).view(B, -1, N, L).permute(0, 2,
3, 1)
tgt = self.proj_out(tgt)
return tgt
class DirectEncoderLayerNew(nn.Module):
def __init__(self, heads, d_in, d_out, d_ff, symmetrize=True, p_drop=0.1):
super(DirectEncoderLayerNew, self).__init__()
self.symmetrize = symmetrize
self.attn = DirectMultiheadAttention(d_in, d_out, heads, dropout=p_drop
)
self.ff = FeedForwardLayer(d_out, d_ff, p_drop=p_drop)
self.drop_1 = nn.Dropout(p_drop, inplace=True)
self.drop_2 = nn.Dropout(p_drop, inplace=True)
self.norm = LayerNorm(d_in)
self.norm1 = LayerNorm(d_out)
self.norm2 = LayerNorm(d_out)
def forward(self, input_0, input_1):
primals_7 = self.attn.proj_pair.weight
primals_3 = self.attn.proj_pair.bias
primals_9 = self.attn.proj_msa.weight
primals_4 = self.attn.proj_msa.bias
primals_11 = self.attn.proj_out.weight
primals_5 = self.attn.proj_out.bias
primals_15 = self.ff.linear1.weight
primals_6 = self.ff.linear1.bias
primals_17 = self.ff.linear2.weight
primals_8 = self.ff.linear2.bias
primals_10 = self.norm.a_2
primals_12 = self.norm.b_2
primals_13 = self.norm1.a_2
primals_14 = self.norm1.b_2
primals_16 = self.norm2.a_2
primals_18 = self.norm2.b_2
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
| wukevin/RoseTTAFold | DirectEncoderLayer | false | 4,593 | [
"MIT"
] | 0 | e3c15dbf4bc1e4f8726e26c63aca1625188da803 | https://github.com/wukevin/RoseTTAFold/tree/e3c15dbf4bc1e4f8726e26c63aca1625188da803 | import torch
import torch.nn as nn
import torch.nn.functional as F
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-05):
super().__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = torch.sqrt(x.var(dim=-1, keepdim=True, unbiased=False) + self.eps
)
x = self.a_2 * (x - mean)
x /= std
x += self.b_2
return x
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_ff, p_drop=0.1):
super().__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(p_drop, inplace=True)
self.linear2 = nn.Linear(d_ff, d_model)
def forward(self, src):
src = self.linear2(self.dropout(F.relu_(self.linear1(src))))
return src
class DirectMultiheadAttention(nn.Module):
def __init__(self, d_in, d_out, heads, dropout=0.1):
super().__init__()
self.heads = heads
self.proj_pair = nn.Linear(d_in, heads)
self.drop = nn.Dropout(dropout, inplace=True)
self.proj_msa = nn.Linear(d_out, d_out)
self.proj_out = nn.Linear(d_out, d_out)
def forward(self, src, tgt):
B, N, L = tgt.shape[:3]
attn_map = F.softmax(self.proj_pair(src), dim=1).permute(0, 3, 1, 2)
attn_map = self.drop(attn_map).unsqueeze(1)
value = self.proj_msa(tgt).permute(0, 3, 1, 2).contiguous().view(B,
-1, self.heads, N, L)
tgt = torch.matmul(value, attn_map).view(B, -1, N, L).permute(0, 2,
3, 1)
tgt = self.proj_out(tgt)
return tgt
class Model(nn.Module):
def __init__(self, heads, d_in, d_out, d_ff, symmetrize=True, p_drop=0.1):
super().__init__()
self.symmetrize = symmetrize
self.attn = DirectMultiheadAttention(d_in, d_out, heads, dropout=p_drop
)
self.ff = FeedForwardLayer(d_out, d_ff, p_drop=p_drop)
self.drop_1 = nn.Dropout(p_drop, inplace=True)
self.drop_2 = nn.Dropout(p_drop, inplace=True)
self.norm = LayerNorm(d_in)
self.norm1 = LayerNorm(d_out)
self.norm2 = LayerNorm(d_out)
def forward(self, src, tgt):
B, N, L = tgt.shape[:3]
if self.symmetrize:
src = 0.5 * (src + src.permute(0, 2, 1, 3))
src = self.norm(src)
tgt2 = self.norm1(tgt)
tgt2 = self.attn(src, tgt2)
tgt = tgt + self.drop_1(tgt2)
tgt2 = self.norm2(tgt.view(B * N, L, -1)).view(B, N, L, -1)
tgt2 = self.ff(tgt2)
tgt = tgt + self.drop_2(tgt2)
return tgt
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 4]
|
SEModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [4, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x0), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ad/cadccuyhl7stcp3nyqfgohiwbiv5ckfzxsye27ithwsill6dvmh4.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lp/clprvnh5p6cmadxtwzizwydrpjlwxohxixbw4ntucp6srbu6gtis.py
# Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# x_4 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 1, 1), (1, 1, 1, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 4, grid=grid(4), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf4, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0)
return (buf5, primals_1, primals_2, primals_4, buf0, buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SEModule(nn.Module):
def __init__(self, planes, compress_rate):
super(SEModule, self).__init__()
self.conv1 = nn.Conv2d(planes, planes // compress_rate, kernel_size
=1, stride=1, bias=True)
self.conv2 = nn.Conv2d(planes // compress_rate, planes, kernel_size
=1, stride=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = F.avg_pool2d(module_input, kernel_size=module_input.size(2))
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return module_input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'planes': 4, 'compress_rate': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 1, 1), (1, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(4)](buf2, primals_3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(16)](buf4, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf4, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf5, primals_1, primals_2, primals_4, buf0, buf2, buf4
class SEModuleNew(nn.Module):
def __init__(self, planes, compress_rate):
super(SEModuleNew, self).__init__()
self.conv1 = nn.Conv2d(planes, planes // compress_rate, kernel_size
=1, stride=1, bias=True)
self.conv2 = nn.Conv2d(planes // compress_rate, planes, kernel_size
=1, stride=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| xuehaouwa/VGGFace2-pytorch | SEModule | false | 4,594 | [
"MIT"
] | 0 | c38e11f893e5bcc273a9b847530cd619019b636c | https://github.com/xuehaouwa/VGGFace2-pytorch/tree/c38e11f893e5bcc273a9b847530cd619019b636c | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, planes, compress_rate):
super().__init__()
self.conv1 = nn.Conv2d(planes, planes // compress_rate, kernel_size
=1, stride=1, bias=True)
self.conv2 = nn.Conv2d(planes // compress_rate, planes, kernel_size
=1, stride=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = F.avg_pool2d(module_input, kernel_size=module_input.size(2))
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return module_input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Upsample4x | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5e/c5e5lzxp6mart5akee4vyatlzs4an37oi76thijue43evdlt4onb.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min, clamp_min_2, clamp_min_3, convert_element_type, convert_element_type_1, convert_element_type_3, iota, mul, mul_2, mul_3, mul_4, sub, sub_2, sub_3, sub_4, sub_5, sub_6
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.25), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = x0
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp11 + tmp2
tmp13 = tmp12 * tmp4
tmp14 = tmp13 - tmp2
tmp15 = triton_helpers.maximum(tmp14, tmp7)
tmp16 = tmp15.to(tl.int32)
tmp17 = tl.load(in_ptr0 + (tmp16 + (4*tmp9) + (16*x2)), None, eviction_policy='evict_last')
tmp18 = tl.full([1], 1, tl.int64)
tmp19 = tmp16 + tmp18
tmp20 = tl.full([1], 3, tl.int64)
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (tmp21 + (4*tmp9) + (16*x2)), None, eviction_policy='evict_last')
tmp23 = tmp22 - tmp17
tmp24 = tmp16.to(tl.float32)
tmp25 = tmp15 - tmp24
tmp26 = triton_helpers.maximum(tmp25, tmp7)
tmp27 = 1.0
tmp28 = triton_helpers.minimum(tmp26, tmp27)
tmp29 = tmp23 * tmp28
tmp30 = tmp17 + tmp29
tmp31 = tmp9 + tmp18
tmp32 = triton_helpers.minimum(tmp31, tmp20)
tmp33 = tl.load(in_ptr0 + (tmp21 + (4*tmp32) + (16*x2)), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr0 + (tmp16 + (4*tmp32) + (16*x2)), None, eviction_policy='evict_last')
tmp35 = tmp33 - tmp34
tmp36 = tmp35 * tmp28
tmp37 = tmp34 + tmp36
tmp38 = tmp37 - tmp30
tmp39 = tmp9.to(tl.float32)
tmp40 = tmp8 - tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp7)
tmp42 = triton_helpers.minimum(tmp41, tmp27)
tmp43 = tmp38 * tmp42
tmp44 = tmp30 + tmp43
tl.store(in_out_ptr0 + (x3), tmp44, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k3/ck32bniqdx2lm3oqd744i6qcx3pdbr4dqljnsqayjqvotrywlsy6.py
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_6, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
buf2 = buf1; del buf1 # reuse
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf3, primals_1, 4096, grid=grid(4096), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 16, 16), (1024, 256, 16, 1))
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf5, primals_3, buf6, 4096, grid=grid(4096), stream=stream0)
del primals_3
return (buf5, primals_2, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Upsample4x(nn.Module):
def __init__(self, n_channels):
super(Upsample4x, self).__init__()
self.conv = nn.Conv2d(n_channels, n_channels, 3, 1, 1)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=4, mode=
'bilinear', align_corners=False)
x = torch.relu(self.conv(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = x0
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp11 + tmp2
tmp13 = tmp12 * tmp4
tmp14 = tmp13 - tmp2
tmp15 = triton_helpers.maximum(tmp14, tmp7)
tmp16 = tmp15.to(tl.int32)
tmp17 = tl.load(in_ptr0 + (tmp16 + 4 * tmp9 + 16 * x2), None,
eviction_policy='evict_last')
tmp18 = tl.full([1], 1, tl.int64)
tmp19 = tmp16 + tmp18
tmp20 = tl.full([1], 3, tl.int64)
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (tmp21 + 4 * tmp9 + 16 * x2), None,
eviction_policy='evict_last')
tmp23 = tmp22 - tmp17
tmp24 = tmp16.to(tl.float32)
tmp25 = tmp15 - tmp24
tmp26 = triton_helpers.maximum(tmp25, tmp7)
tmp27 = 1.0
tmp28 = triton_helpers.minimum(tmp26, tmp27)
tmp29 = tmp23 * tmp28
tmp30 = tmp17 + tmp29
tmp31 = tmp9 + tmp18
tmp32 = triton_helpers.minimum(tmp31, tmp20)
tmp33 = tl.load(in_ptr0 + (tmp21 + 4 * tmp32 + 16 * x2), None,
eviction_policy='evict_last')
tmp34 = tl.load(in_ptr0 + (tmp16 + 4 * tmp32 + 16 * x2), None,
eviction_policy='evict_last')
tmp35 = tmp33 - tmp34
tmp36 = tmp35 * tmp28
tmp37 = tmp34 + tmp36
tmp38 = tmp37 - tmp30
tmp39 = tmp9.to(tl.float32)
tmp40 = tmp8 - tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp7)
tmp42 = triton_helpers.minimum(tmp41, tmp27)
tmp43 = tmp38 * tmp42
tmp44 = tmp30 + tmp43
tl.store(in_out_ptr0 + x3, tmp44, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, None)
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
buf2 = buf1
del buf1
buf3 = buf2
del buf2
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(4096)](buf3, primals_1, 4096, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
buf4 = extern_kernels.convolution(buf3, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 16, 16), (1024, 256, 16, 1))
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(4096)](buf5
, primals_3, buf6, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf5, primals_2, buf3, buf6
class Upsample4xNew(nn.Module):
def __init__(self, n_channels):
super(Upsample4xNew, self).__init__()
self.conv = nn.Conv2d(n_channels, n_channels, 3, 1, 1)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| xqterry/lightweight-human-pose-estimation.pytorch | Upsample4x | false | 4,595 | [
"Apache-2.0"
] | 0 | e5ec9452c9bd9683451d3b2f97c6fe9e075b2d48 | https://github.com/xqterry/lightweight-human-pose-estimation.pytorch/tree/e5ec9452c9bd9683451d3b2f97c6fe9e075b2d48 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, n_channels):
super().__init__()
self.conv = nn.Conv2d(n_channels, n_channels, 3, 1, 1)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=4, mode=
'bilinear', align_corners=False)
x = torch.relu(self.conv(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MixPad2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yu/cyu6oyysdpuqkq5k34tbyb2prxsfirwvz7teg6euxroe2cpoacjb.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.copy]
# Source node to ATen node mapping:
# x => copy
# Graph fragment:
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_3, %slice_4), kwargs = {})
# %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor, %copy, 2, 1, 5), kwargs = {})
# %slice_scatter_default_1 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%empty, %slice_scatter_default, 3, 0, 4), kwargs = {})
# %slice_scatter_default_2 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_11, 2, 0, 1), kwargs = {})
# %slice_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_2, %slice_16, 2, 5, 6), kwargs = {})
triton_poi_fused_copy_0 = async_compile.triton('triton_poi_fused_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_copy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 6
x2 = (xindex // 24)
x3 = xindex % 24
x4 = xindex
tmp38 = tl.load(in_ptr1 + (x4), xmask)
tmp0 = x1
tmp1 = tl.full([1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = (-4) + x1
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tmp0 >= tmp4
tmp8 = tmp0 < tmp1
tmp9 = tmp7 & tmp8
tmp10 = tmp9 & tmp6
tmp11 = tl.load(in_ptr0 + ((-4) + x3 + (16*x2)), tmp10 & xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + (x4), tmp6 & xmask, other=0.0)
tmp13 = tl.where(tmp9, tmp11, tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tmp3 >= tmp4
tmp17 = tmp3 < tmp1
tmp18 = tmp16 & tmp17
tmp19 = tmp18 & tmp2
tmp20 = tl.load(in_ptr0 + ((-20) + x3 + (16*x2)), tmp19 & xmask, other=0.0)
tmp21 = tl.load(in_ptr1 + ((-16) + x4), tmp2 & xmask, other=0.0)
tmp22 = tl.where(tmp18, tmp20, tmp21)
tmp23 = tl.where(tmp5, tmp15, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp2, tmp23, tmp24)
tmp26 = tmp0 < tmp4
tmp27 = 4 + x1
tmp28 = tmp27 >= tmp4
tmp29 = tmp27 < tmp1
tmp30 = tmp28 & tmp29
tmp31 = tmp30 & tmp26
tmp32 = tl.load(in_ptr0 + (12 + x3 + (16*x2)), tmp31 & xmask, other=0.0)
tmp33 = tl.load(in_ptr1 + (16 + x4), tmp26 & xmask, other=0.0)
tmp34 = tl.where(tmp30, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp26, tmp34, tmp35)
tmp37 = tl.load(in_ptr0 + ((-4) + x3 + (16*x2)), tmp9 & xmask, other=0.0)
tmp39 = tl.where(tmp9, tmp37, tmp38)
tmp40 = tl.where(tmp26, tmp36, tmp39)
tmp41 = tl.where(tmp2, tmp25, tmp40)
tl.store(out_ptr0 + (x4), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uz/cuzrirpm2rdhvffydfmkfje2kmzy3ztj3vjhhhy5koqgexvvo35y.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.replication_pad2d]
# Source node to ATen node mapping:
# x_1 => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%slice_scatter_default_3, [None, None, %clamp_max, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %clamp_max_1]), kwargs = {})
triton_poi_fused_replication_pad2d_1 = async_compile.triton('triton_poi_fused_replication_pad2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_replication_pad2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_replication_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*((5) * ((5) <= (x1)) + (x1) * ((x1) < (5)))) + (24*x2) + ((3) * ((3) <= (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0))))) + (((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) * ((((0) * ((0) >= ((-1) + x0)) + ((-1) + x0) * (((-1) + x0) > (0)))) < (3)))), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.copy]
stream0 = get_raw_stream(0)
triton_poi_fused_copy_0.run(arg0_1, buf0, buf1, 384, grid=grid(384), stream=stream0)
del arg0_1
del buf0
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.replication_pad2d]
triton_poi_fused_replication_pad2d_1.run(buf1, buf2, 576, grid=grid(576), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from itertools import product as product
import torch.nn as nn
class MixPad2d(nn.Module):
"""Mixed padding modes for H and W dimensions
Args:
padding (tuple): the size of the padding for x and y, ie (pad_x, pad_y)
modes (tuple): the padding modes for x and y, the values of each can be
``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``.
Default: ``['replicate', 'circular']``
"""
__constants__ = ['modes', 'padding']
def __init__(self, padding=[1, 1], modes=['replicate', 'circular']):
super(MixPad2d, self).__init__()
assert len(padding) == 2
self.padding = padding
self.modes = modes
def forward(self, x):
x = nn.functional.pad(x, (0, 0, self.padding[1], self.padding[1]),
self.modes[1])
x = nn.functional.pad(x, (self.padding[0], self.padding[0], 0, 0),
self.modes[0])
return x
def extra_repr(self):
repr_ = (
'Mixed Padding: \t x axis: mode: {}, padding: {},\n\t y axis mode: {}, padding: {}'
.format(self.modes[0], self.padding[0], self.modes[1], self.
padding[1]))
return repr_
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from itertools import product as product
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_copy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 6
x2 = xindex // 24
x3 = xindex % 24
x4 = xindex
tmp38 = tl.load(in_ptr1 + x4, xmask)
tmp0 = x1
tmp1 = tl.full([1], 5, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = -4 + x1
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp5 & tmp2
tmp7 = tmp0 >= tmp4
tmp8 = tmp0 < tmp1
tmp9 = tmp7 & tmp8
tmp10 = tmp9 & tmp6
tmp11 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp10 & xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + x4, tmp6 & xmask, other=0.0)
tmp13 = tl.where(tmp9, tmp11, tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tmp3 >= tmp4
tmp17 = tmp3 < tmp1
tmp18 = tmp16 & tmp17
tmp19 = tmp18 & tmp2
tmp20 = tl.load(in_ptr0 + (-20 + x3 + 16 * x2), tmp19 & xmask, other=0.0)
tmp21 = tl.load(in_ptr1 + (-16 + x4), tmp2 & xmask, other=0.0)
tmp22 = tl.where(tmp18, tmp20, tmp21)
tmp23 = tl.where(tmp5, tmp15, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp2, tmp23, tmp24)
tmp26 = tmp0 < tmp4
tmp27 = 4 + x1
tmp28 = tmp27 >= tmp4
tmp29 = tmp27 < tmp1
tmp30 = tmp28 & tmp29
tmp31 = tmp30 & tmp26
tmp32 = tl.load(in_ptr0 + (12 + x3 + 16 * x2), tmp31 & xmask, other=0.0)
tmp33 = tl.load(in_ptr1 + (16 + x4), tmp26 & xmask, other=0.0)
tmp34 = tl.where(tmp30, tmp32, tmp33)
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp26, tmp34, tmp35)
tmp37 = tl.load(in_ptr0 + (-4 + x3 + 16 * x2), tmp9 & xmask, other=0.0)
tmp39 = tl.where(tmp9, tmp37, tmp38)
tmp40 = tl.where(tmp26, tmp36, tmp39)
tmp41 = tl.where(tmp2, tmp25, tmp40)
tl.store(out_ptr0 + x4, tmp41, xmask)
@triton.jit
def triton_poi_fused_replication_pad2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (5 * (5 <= x1) + x1 * (x1 < 5)) + 24 * x2 +
(3 * (3 <= 0 * (0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) + (0 * (
0 >= -1 + x0) + (-1 + x0) * (-1 + x0 > 0)) * (0 * (0 >= -1 + x0) +
(-1 + x0) * (-1 + x0 > 0) < 3))), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
buf0 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 6, 4), (96, 24, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_copy_0[grid(384)](arg0_1, buf0, buf1, 384, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
del buf0
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_replication_pad2d_1[grid(576)](buf1, buf2, 576,
XBLOCK=256, num_warps=4, num_stages=1)
del buf1
return buf2,
class MixPad2dNew(nn.Module):
"""Mixed padding modes for H and W dimensions
Args:
padding (tuple): the size of the padding for x and y, ie (pad_x, pad_y)
modes (tuple): the padding modes for x and y, the values of each can be
``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``.
Default: ``['replicate', 'circular']``
"""
__constants__ = ['modes', 'padding']
def __init__(self, padding=[1, 1], modes=['replicate', 'circular']):
super(MixPad2dNew, self).__init__()
assert len(padding) == 2
self.padding = padding
self.modes = modes
def extra_repr(self):
repr_ = (
'Mixed Padding: \t x axis: mode: {}, padding: {},\n\t y axis mode: {}, padding: {}'
.format(self.modes[0], self.padding[0], self.modes[1], self.
padding[1]))
return repr_
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| xqyzjl/face_parsing | MixPad2d | false | 4,596 | [
"MIT"
] | 0 | 3d6c7b06d67c8fbf01bce22db199bc94a13a1a7c | https://github.com/xqyzjl/face_parsing/tree/3d6c7b06d67c8fbf01bce22db199bc94a13a1a7c | import torch
from itertools import product as product
import torch.nn as nn
class Model(nn.Module):
"""Mixed padding modes for H and W dimensions
Args:
padding (tuple): the size of the padding for x and y, ie (pad_x, pad_y)
modes (tuple): the padding modes for x and y, the values of each can be
``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``.
Default: ``['replicate', 'circular']``
"""
__constants__ = ['modes', 'padding']
def __init__(self, padding=[1, 1], modes=['replicate', 'circular']):
super().__init__()
assert len(padding) == 2
self.padding = padding
self.modes = modes
def forward(self, x):
x = nn.functional.pad(x, (0, 0, self.padding[1], self.padding[1]),
self.modes[1])
x = nn.functional.pad(x, (self.padding[0], self.padding[0], 0, 0),
self.modes[0])
return x
def extra_repr(self):
repr_ = (
'Mixed Padding: \t x axis: mode: {}, padding: {},\n\t y axis mode: {}, padding: {}'
.format(self.modes[0], self.padding[0], self.modes[1], self.
padding[1]))
return repr_
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
GraphAttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fd/cfdzxywc6gfatrb2cciasco4amlaqiff3eq75s6456ncgemwqr2f.py
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
# Source node to ATen node mapping:
# gt => gt
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_4, 0), kwargs = {})
triton_poi_fused_gt_0 = async_compile.triton('triton_poi_fused_gt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qj/cqj2yzzoizkpi7j34ba4c5hjk7zwldtz2asgnlpps6ynwyjfj7av.py
# Topologically Sorted Source Nodes: [zero_vec, _attention, attention], Original ATen: [aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# _attention => where
# attention => amax, exp, sub, sum_1
# zero_vec => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_6, %full_default), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_mul_where_1 = async_compile.triton('triton_poi_fused__softmax_mul_where_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_where_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_where_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = -8999999815811072.0
tmp5 = tl.where(tmp0, tmp3, tmp4)
tmp9 = tmp7 + tmp8
tmp10 = tl.where(tmp6, tmp9, tmp4)
tmp11 = triton_helpers.maximum(tmp5, tmp10)
tmp15 = tmp13 + tmp14
tmp16 = tl.where(tmp12, tmp15, tmp4)
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp21 = tmp19 + tmp20
tmp22 = tl.where(tmp18, tmp21, tmp4)
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp24 = tmp5 - tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp10 - tmp23
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp16 - tmp23
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp22 - tmp23
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tl.store(out_ptr0 + (x0), tmp23, xmask)
tl.store(out_ptr1 + (x0), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ah/cah2hs3rlo3lnpwmpsbts72lgns4kmf25uryjatmojnyggb2i7pl.py
# Topologically Sorted Source Nodes: [zero_vec, _attention, attention], Original ATen: [aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# _attention => where
# attention => div, exp, sub
# zero_vec => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_6, %full_default), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_mul_where_2 = async_compile.triton('triton_poi_fused__softmax_mul_where_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_where_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_where_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = -8999999815811072.0
tmp5 = tl.where(tmp0, tmp3, tmp4)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sz/cszljhyxbwdpeaoht5rks2td5aannynp6sjwinyp7s3qrxmtq4bj.py
# Topologically Sorted Source Nodes: [x_new], Original ATen: [aten.elu]
# Source node to ATen node mapping:
# x_new => expm1, gt_1, mul_1, mul_3, where_1
# Graph fragment:
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_10, 0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_10, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_1, %mul_3), kwargs = {})
triton_poi_fused_elu_3 = async_compile.triton('triton_poi_fused_elu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [xW], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
stream0 = get_raw_stream(0)
triton_poi_fused_gt_0.run(primals_4, buf2, 256, grid=grid(256), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [zero_vec, _attention, attention], Original ATen: [aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_mul_where_1.run(buf2, buf1, primals_3, buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [zero_vec, _attention, attention], Original ATen: [aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_mul_where_2.run(buf5, buf2, primals_3, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
del buf4
del primals_3
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_new], Original ATen: [aten.elu]
triton_poi_fused_elu_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
return (buf7, buf0, buf2, buf5, buf6, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def module_test_print(var_input, var_inmed, var_ouput):
for var in (var_input, var_inmed, var_ouput):
None
for key, value in var.items():
None
None
class GraphAttentionLayer(nn.Module):
def __init__(self, dim_input, dim_output, dropout=0.0,
negative_slope_LeakyRelu=0.01, module_test=False):
super(GraphAttentionLayer, self).__init__()
self.dim_input = dim_input
self.dim_output = dim_output
self.dropout = dropout
self.W = nn.Parameter(torch.empty(size=(dim_input, dim_output)))
nn.init.xavier_uniform_(self.W.data, gain=nn.init.calculate_gain(
'leaky_relu', negative_slope_LeakyRelu))
self.leakyrelu = nn.LeakyReLU(negative_slope_LeakyRelu)
self.module_test = module_test
def _attention_score(self, x, y=None):
return torch.matmul(x, x.transpose(-2, -1))
def forward(self, x, adj, A):
"""
input:
x : (batch_size, n_node, dim_node)
adj : (batch_size, n_node, n_node)
"""
xW = torch.matmul(x, self.W)
score = self._attention_score(xW)
score += A
zero_vec = -9000000000000000.0 * torch.ones_like(score)
_attention = torch.where(adj > 0, score, zero_vec)
attention = F.softmax(_attention, dim=-1)
attention = F.dropout(attention, self.dropout, training=self.training)
x_new = F.elu(torch.matmul(attention, xW))
if self.module_test:
var_input = ['x', 'adj']
var_inmed = ['xW', 'score', 'zero_vec', '_attention', 'attention']
var_ouput = ['x_new']
locals_cap = locals()
module_test_print(dict(zip(var_input, [eval(v, locals_cap) for
v in var_input])), dict(zip(var_inmed, [eval(v, locals_cap) for
v in var_inmed])), dict(zip(var_ouput, [eval(v, locals_cap) for
v in var_ouput])))
return x_new
def __repr__(self):
return '{}({}->{})'.format(self.__class__.__name__, self.dim_input,
self.dim_output)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_input': 4, 'dim_output': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_where_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp1 + tmp2
tmp4 = -8999999815811072.0
tmp5 = tl.where(tmp0, tmp3, tmp4)
tmp9 = tmp7 + tmp8
tmp10 = tl.where(tmp6, tmp9, tmp4)
tmp11 = triton_helpers.maximum(tmp5, tmp10)
tmp15 = tmp13 + tmp14
tmp16 = tl.where(tmp12, tmp15, tmp4)
tmp17 = triton_helpers.maximum(tmp11, tmp16)
tmp21 = tmp19 + tmp20
tmp22 = tl.where(tmp18, tmp21, tmp4)
tmp23 = triton_helpers.maximum(tmp17, tmp22)
tmp24 = tmp5 - tmp23
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp10 - tmp23
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp16 - tmp23
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tmp32 = tmp22 - tmp23
tmp33 = tl_math.exp(tmp32)
tmp34 = tmp31 + tmp33
tl.store(out_ptr0 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp34, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_where_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = -8999999815811072.0
tmp5 = tl.where(tmp0, tmp3, tmp4)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_elu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_gt_0[grid(256)](primals_4, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_mul_where_1[grid(64)](buf2, buf1,
primals_3, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_mul_where_2[grid(256)](buf5, buf2,
primals_3, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del buf4
del primals_3
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_elu_3[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf7, buf0, buf2, buf5, buf6, reinterpret_tensor(primals_2, (4,
64), (1, 4), 0)
def module_test_print(var_input, var_inmed, var_ouput):
for var in (var_input, var_inmed, var_ouput):
None
for key, value in var.items():
None
None
class GraphAttentionLayerNew(nn.Module):
def __init__(self, dim_input, dim_output, dropout=0.0,
negative_slope_LeakyRelu=0.01, module_test=False):
super(GraphAttentionLayerNew, self).__init__()
self.dim_input = dim_input
self.dim_output = dim_output
self.dropout = dropout
self.W = nn.Parameter(torch.empty(size=(dim_input, dim_output)))
nn.init.xavier_uniform_(self.W.data, gain=nn.init.calculate_gain(
'leaky_relu', negative_slope_LeakyRelu))
self.leakyrelu = nn.LeakyReLU(negative_slope_LeakyRelu)
self.module_test = module_test
def _attention_score(self, x, y=None):
return torch.matmul(x, x.transpose(-2, -1))
def __repr__(self):
return '{}({}->{})'.format(self.__class__.__name__, self.dim_input,
self.dim_output)
def forward(self, input_0, input_1, input_2):
primals_1 = self.W
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| xlx0010/HGNN | GraphAttentionLayer | false | 4,597 | [
"MIT"
] | 0 | 219352405db021c1f435f3aa55961adcf2a6df19 | https://github.com/xlx0010/HGNN/tree/219352405db021c1f435f3aa55961adcf2a6df19 | import torch
import torch.nn as nn
import torch.nn.functional as F
def module_test_print(var_input, var_inmed, var_ouput):
for var in (var_input, var_inmed, var_ouput):
None
for key, value in var.items():
None
None
class Model(nn.Module):
def __init__(self, dim_input, dim_output, dropout=0.0,
negative_slope_LeakyRelu=0.01, module_test=False):
super().__init__()
self.dim_input = dim_input
self.dim_output = dim_output
self.dropout = dropout
self.W = nn.Parameter(torch.empty(size=(dim_input, dim_output)))
nn.init.xavier_uniform_(self.W.data, gain=nn.init.calculate_gain(
'leaky_relu', negative_slope_LeakyRelu))
self.leakyrelu = nn.LeakyReLU(negative_slope_LeakyRelu)
self.module_test = module_test
def _attention_score(self, x, y=None):
return torch.matmul(x, x.transpose(-2, -1))
def forward(self, x, adj, A):
"""
input:
x : (batch_size, n_node, dim_node)
adj : (batch_size, n_node, n_node)
"""
xW = torch.matmul(x, self.W)
score = self._attention_score(xW)
score += A
zero_vec = -9000000000000000.0 * torch.ones_like(score)
_attention = torch.where(adj > 0, score, zero_vec)
attention = F.softmax(_attention, dim=-1)
attention = F.dropout(attention, self.dropout, training=self.training)
x_new = F.elu(torch.matmul(attention, xW))
if self.module_test:
var_input = ['x', 'adj']
var_inmed = ['xW', 'score', 'zero_vec', '_attention', 'attention']
var_ouput = ['x_new']
locals_cap = locals()
module_test_print(dict(zip(var_input, [eval(v, locals_cap) for
v in var_input])), dict(zip(var_inmed, [eval(v, locals_cap) for
v in var_inmed])), dict(zip(var_ouput, [eval(v, locals_cap) for
v in var_ouput])))
return x_new
def __repr__(self):
return '{}({}->{})'.format(self.__class__.__name__, self.dim_input,
self.dim_output)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ReturnAsLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jc/cjczc5as556suobthf34fqmn7mcwneofcng4c32v3vxmayhyhupg.py
# Topologically Sorted Source Nodes: [add, mul, sum_1, log, sum_2, neg], Original ATen: [aten.add, aten.mul, aten.sum, aten.log, aten.neg]
# Source node to ATen node mapping:
# add => add
# log => log
# mul => mul
# neg => neg
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %add), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%log,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
triton_per_fused_add_log_mul_neg_sum_0 = async_compile.triton('triton_per_fused_add_log_mul_neg_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp6 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp10 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp11 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp15 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp16 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 * tmp3
tmp7 = tmp6 + tmp2
tmp8 = tmp5 * tmp7
tmp9 = tmp4 + tmp8
tmp12 = tmp11 + tmp2
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp16 + tmp2
tmp18 = tmp15 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tl_math.log(tmp19)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.sum(tmp21, 1)[:, None]
tmp24 = -tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp24, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, mul, sum_1, log, sum_2, neg], Original ATen: [aten.add, aten.mul, aten.sum, aten.log, aten.neg]
stream0 = get_raw_stream(0)
triton_per_fused_add_log_mul_neg_sum_0.run(buf1, arg1_1, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ReturnAsLoss(nn.Module):
def __init__(self):
super(ReturnAsLoss, self).__init__()
def forward(self, output, y):
"""negative logarithm return"""
return -torch.sum(torch.log(torch.sum(output * (y + 1), dim=1)))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp6 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp10 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp11 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp15 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 * tmp3
tmp7 = tmp6 + tmp2
tmp8 = tmp5 * tmp7
tmp9 = tmp4 + tmp8
tmp12 = tmp11 + tmp2
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp16 + tmp2
tmp18 = tmp15 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tl_math.log(tmp19)
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.sum(tmp21, 1)[:, None]
tmp24 = -tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mul_neg_sum_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class ReturnAsLossNew(nn.Module):
def __init__(self):
super(ReturnAsLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| yanxurui/portfolio | ReturnAsLoss | false | 4,598 | [
"MIT"
] | 0 | 032cf47ccac1c5815fd4827bf0d5f3cf43cec990 | https://github.com/yanxurui/portfolio/tree/032cf47ccac1c5815fd4827bf0d5f3cf43cec990 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, y):
"""negative logarithm return"""
return -torch.sum(torch.log(torch.sum(output * (y + 1), dim=1)))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SqueezeExcitation | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sh/cshbgrlhlsuuebcz7jbje66sr2nkeng6kilqpqluwrr5ru2afxle.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7g/c7gotp6sosnxx5bgxfizl4c5ol6y7iy3n4betrvaeug7kitjfzgr.py
# Topologically Sorted Source Nodes: [x_3, x_4, mul], Original ATen: [aten.convolution, aten.hardsigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# x_3 => convolution_1
# x_4 => add, clamp_max, clamp_min, div
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %div), kwargs = {})
triton_poi_fused_convolution_hardsigmoid_mul_2 = async_compile.triton('triton_poi_fused_convolution_hardsigmoid_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_hardsigmoid_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hv/chvrs33e2wyvaktcs4kouk236tzwn7sho4sgx2yjf7sjc72okw6d.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution, aten.hardsigmoid_backward]
# Source node to ATen node mapping:
# x_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, -3.0), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%convolution_1, 3.0), kwargs = {})
# %bitwise_and : [num_users=1] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt, %lt), kwargs = {})
triton_poi_fused_convolution_hardsigmoid_backward_3 = async_compile.triton('triton_poi_fused_convolution_hardsigmoid_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_hardsigmoid_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -3.0
tmp4 = tmp2 > tmp3
tmp5 = 3.0
tmp6 = tmp2 < tmp5
tmp7 = tmp4 & tmp6
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 32, grid=grid(32), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, x_4, mul], Original ATen: [aten.convolution, aten.hardsigmoid, aten.mul]
triton_poi_fused_convolution_hardsigmoid_mul_2.run(primals_1, buf4, primals_5, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution, aten.hardsigmoid_backward]
triton_poi_fused_convolution_hardsigmoid_backward_3.run(buf4, primals_5, buf6, 16, grid=grid(16), stream=stream0)
del buf4
del primals_5
return (buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
def _make_divisible(width, divisor=8):
new_width = max(divisor, int(width + divisor / 2) // divisor * divisor)
if new_width < 0.9 * width:
new_width += divisor
return new_width
class SqueezeExcitation(torch.nn.Module):
""" [https://arxiv.org/abs/1709.01507] """
def __init__(self, c1):
super().__init__()
c2 = _make_divisible(c1 // 4)
self.pool = torch.nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = torch.nn.Conv2d(in_channels=c1, out_channels=c2,
kernel_size=1)
self.conv2 = torch.nn.Conv2d(in_channels=c2, out_channels=c1,
kernel_size=1)
self.relu = torch.nn.ReLU()
self.hard = torch.nn.Hardsigmoid()
def _scale(self, x):
x = self.pool(x)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.hard(x)
return x
def forward(self, x):
return x * self._scale(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -3.0
tmp4 = tmp2 > tmp3
tmp5 = 3.0
tmp6 = tmp2 < tmp5
tmp7 = tmp4 & tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(32)](buf3, primals_3, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_hardsigmoid_mul_2[grid(256)](primals_1,
buf4, primals_5, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_hardsigmoid_backward_3[grid(16)](buf4,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
def _make_divisible(width, divisor=8):
new_width = max(divisor, int(width + divisor / 2) // divisor * divisor)
if new_width < 0.9 * width:
new_width += divisor
return new_width
class SqueezeExcitationNew(torch.nn.Module):
""" [https://arxiv.org/abs/1709.01507] """
def __init__(self, c1):
super().__init__()
c2 = _make_divisible(c1 // 4)
self.pool = torch.nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = torch.nn.Conv2d(in_channels=c1, out_channels=c2,
kernel_size=1)
self.conv2 = torch.nn.Conv2d(in_channels=c2, out_channels=c1,
kernel_size=1)
self.relu = torch.nn.ReLU()
self.hard = torch.nn.Hardsigmoid()
def _scale(self, x):
x = self.pool(x)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.hard(x)
return x
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| yakhyo/MobileNetV3-pt | SqueezeExcitation | false | 4,599 | [
"MIT"
] | 0 | 1fbc966036ed9f036090b3efe3e700f057aa7dde | https://github.com/yakhyo/MobileNetV3-pt/tree/1fbc966036ed9f036090b3efe3e700f057aa7dde | import torch
import torch.utils.data
def _make_divisible(width, divisor=8):
new_width = max(divisor, int(width + divisor / 2) // divisor * divisor)
if new_width < 0.9 * width:
new_width += divisor
return new_width
class Model(torch.nn.Module):
""" [https://arxiv.org/abs/1709.01507] """
def __init__(self, c1):
super().__init__()
c2 = _make_divisible(c1 // 4)
self.pool = torch.nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = torch.nn.Conv2d(in_channels=c1, out_channels=c2,
kernel_size=1)
self.conv2 = torch.nn.Conv2d(in_channels=c2, out_channels=c1,
kernel_size=1)
self.relu = torch.nn.ReLU()
self.hard = torch.nn.Hardsigmoid()
def _scale(self, x):
x = self.pool(x)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.hard(x)
return x
def forward(self, x):
return x * self._scale(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Binary | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/25/c25abmnfvpbminquuuccx3uuxiwwey6apnv4db7kdbjdmozwrbct.py
# Topologically Sorted Source Nodes: [setitem, setitem_1, binary_cross_entropy], Original ATen: [aten.lift_fresh, aten.index_put, aten.binary_cross_entropy]
# Source node to ATen node mapping:
# binary_cross_entropy => full_default_2, full_default_3, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1
# setitem => full_default, index_put
# setitem_1 => full_default_1, index_put_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.8999999761581421), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put.default](args = (%arg0_1, [%gt], %full_default), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_1 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put, [%lt], %full_default_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%index_put_1, 1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default_2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_put_1, %maximum_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {})
triton_per_fused_binary_cross_entropy_index_put_lift_fresh_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_index_put_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_index_put_lift_fresh_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_index_put_lift_fresh_0(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp9 = tl.load(in_ptr1 + (r0), None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.8999999761581421
tmp4 = tl.where(tmp2, tmp3, tmp0)
tmp5 = tmp0 < tmp1
tmp6 = tl.where(tmp5, tmp1, tmp4)
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = -tmp9
tmp11 = libdevice.log1p(tmp10)
tmp12 = -100.0
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp8 * tmp13
tmp15 = tl_math.log(tmp9)
tmp16 = triton_helpers.maximum(tmp15, tmp12)
tmp17 = tmp6 * tmp16
tmp18 = tmp14 - tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [setitem, setitem_1, binary_cross_entropy], Original ATen: [aten.lift_fresh, aten.index_put, aten.binary_cross_entropy]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_index_put_lift_fresh_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Binary(nn.Module):
def __init__(self):
super().__init__()
self._criteria = nn.BCELoss()
def forward(self, output, y):
y_copy = y.clone()
y_copy[y > 0] = 0.9
y_copy[y < 0] = 0
return self._criteria(output, y_copy)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_index_put_lift_fresh_0(in_out_ptr1,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.8999999761581421
tmp4 = tl.where(tmp2, tmp3, tmp0)
tmp5 = tmp0 < tmp1
tmp6 = tl.where(tmp5, tmp1, tmp4)
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = -tmp9
tmp11 = libdevice.log1p(tmp10)
tmp12 = -100.0
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = tmp8 * tmp13
tmp15 = tl_math.log(tmp9)
tmp16 = triton_helpers.maximum(tmp15, tmp12)
tmp17 = tmp6 * tmp16
tmp18 = tmp14 - tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_index_put_lift_fresh_0[grid(1)](
buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class BinaryNew(nn.Module):
def __init__(self):
super().__init__()
self._criteria = nn.BCELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| yanxurui/portfolio | Binary | false | 4,600 | [
"MIT"
] | 0 | 032cf47ccac1c5815fd4827bf0d5f3cf43cec990 | https://github.com/yanxurui/portfolio/tree/032cf47ccac1c5815fd4827bf0d5f3cf43cec990 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self._criteria = nn.BCELoss()
def forward(self, output, y):
y_copy = y.clone()
y_copy[y > 0] = 0.9
y_copy[y < 0] = 0
return self._criteria(output, y_copy)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CustomizedLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xx/cxxo2iuieuo7bzosoql3rtwp5kw2uu67gwgjmm7sgrhljjww5qs3.py
# Topologically Sorted Source Nodes: [mul, sum_1, mean, neg], Original ATen: [aten.mul, aten.sum, aten.mean, aten.neg]
# Source node to ATen node mapping:
# mean => mean
# mul => mul
# neg => neg
# sum_1 => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {})
triton_per_fused_mean_mul_neg_sum_0 = async_compile.triton('triton_per_fused_mean_mul_neg_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp7 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp11 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp12 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.sum(tmp15, 1)[:, None]
tmp18 = 64.0
tmp19 = tmp17 / tmp18
tmp20 = -tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sum_1, mean, neg], Original ATen: [aten.mul, aten.sum, aten.mean, aten.neg]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_neg_sum_0.run(buf1, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CustomizedLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, y):
return -torch.mean(torch.sum(output * y, dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp7 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp11 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp12 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.sum(tmp15, 1)[:, None]
tmp18 = 64.0
tmp19 = tmp17 / tmp18
tmp20 = -tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_neg_sum_0[grid(1)](buf1, arg0_1, arg1_1,
1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class CustomizedLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| yanxurui/portfolio | CustomizedLoss | false | 4,601 | [
"MIT"
] | 0 | 032cf47ccac1c5815fd4827bf0d5f3cf43cec990 | https://github.com/yanxurui/portfolio/tree/032cf47ccac1c5815fd4827bf0d5f3cf43cec990 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, y):
return -torch.mean(torch.sum(output * y, dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.