entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
pytorch_code
stringlengths
200
4.05k
CBDNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py # Topologically Sorted Source Nodes: [x, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # relu => relu # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/u2/cu2nx5gatbi2wnsckd5vputtg7asjzcjr7nokvs6in2jknlo3sxf.py # Topologically Sorted Source Nodes: [x_4, relu_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu_4 => relu_4 # x_4 => convolution_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %le_19 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, None) tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/l2/cl2yrhjfsu3t5e65ggkug6mp3hys7hme6rjc4jfgdtxqomziurrj.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_5 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %relu_4], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 98304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 6 x0 = xindex % 4096 x2 = (xindex // 24576) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (12288*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 6, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-3) + x1)) + (12288*x2)), tmp6, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/m3/cm3wzz4v2zy2xot3ql5aqbqjdmnjr6s3qrqg34iqvukjyasoya2d.py # Topologically Sorted Source Nodes: [x_6, relu_5], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # relu_5 => relu_5 # x_6 => convolution_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/3k/c3ky42e7z7kc7crxooe2zxauw552hz5v6luk6eipnw6f6mv3rbjv.py # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_10 => convolution_9 # Graph fragment: # %convolution_9 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_8, %primals_20, %primals_21, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/a7/ca7u5y7sgzp7rip5a7gwfwo4ibyzvxior4jhdl3kkdlgj35frdtq.py # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_11 => convolution_10 # Graph fragment: # %convolution_10 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_9, %primals_22, %primals_23, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ww/cwweet4qrbwyj2grqpcyv3yfivnlimg33ousukrlrkmur4malgmj.py # Topologically Sorted Source Nodes: [x_12, relu_9], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # relu_9 => relu_9 # x_12 => convolution_11 # Graph fragment: # %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/dx/cdxjsf5gno4onjrurf3jffldds2jzgnvodvhah25crofyyyzo2uu.py # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_15 => convolution_14 # Graph fragment: # %convolution_14 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_11, %primals_30, %primals_31, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ze/czeyptvjc3bzvvlms6nxxgdcw4ugmbmn262eg3pyxzl3g7ft2icr.py # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_16 => convolution_15 # Graph fragment: # %convolution_15 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_14, %primals_32, %primals_33, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_8 = async_compile.triton('triton_poi_fused_convolution_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/hn/chnctragayz5n3xkzod5u4bpw6iezzbvzv3cd2d3wgjog2fwaaer.py # Topologically Sorted Source Nodes: [x_17, relu_12], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # relu_12 => relu_12 # x_17 => convolution_16 # Graph fragment: # %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_15, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_12 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_16,), kwargs = {}) triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/wu/cwuaq2b3f6xldsm5mfsle3bdyuoxcejjsbdkihyhe3eazhtx22j3.py # Topologically Sorted Source Nodes: [x_23, relu_18], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # relu_18 => relu_18 # x_23 => convolution_22 # Graph fragment: # %convolution_22 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_17, %primals_46, %primals_47, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_18 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_22,), kwargs = {}) triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jb/cjbbqrk6kces7joxm6loh62d5ljw3tw7kceh6r3rawtngj36y46a.py # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add] # Source node to ATen node mapping: # x_25 => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_23, %relu_11), kwargs = {}) triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/3f/c3fhe7v4db2lx3rqy3r7pncx5vtfygo5ugpvsumaool5ba46bviu.py # Topologically Sorted Source Nodes: [x_28, relu_21], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # relu_21 => relu_21 # x_28 => convolution_26 # Graph fragment: # %convolution_26 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_20, %primals_53, %primals_54, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_21 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_26,), kwargs = {}) triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/55/c55ia3jdroplx57d7fah75dclmxd32mvrpbtdwff7nkcjk6qcktt.py # Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.add] # Source node to ATen node mapping: # x_30 => add_1 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_27, %relu_8), kwargs = {}) triton_poi_fused_add_13 = async_compile.triton('triton_poi_fused_add_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/bg/cbgmn5csl4fq4u2muag7w2zi5rjnzw4zyaycveufrmfuakwdpqdu.py # Topologically Sorted Source Nodes: [x_33, y], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # x_33 => convolution_30 # y => add_2 # Graph fragment: # %convolution_30 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_23, %primals_60, %primals_61, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %convolution_30), kwargs = {}) triton_poi_fused_add_convolution_14 = async_compile.triton('triton_poi_fused_add_convolution_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_14(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_out_ptr0 + (x3), None) tmp2 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61 = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32, ), (1, )) assert_size_stride(primals_10, (3, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (3, ), (1, )) assert_size_stride(primals_12, (64, 6, 3, 3), (54, 9, 3, 1)) assert_size_stride(primals_13, (64, ), (1, )) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64, ), (1, )) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64, ), (1, )) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64, ), (1, )) assert_size_stride(primals_20, (256, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_21, (256, ), (1, )) assert_size_stride(primals_22, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_23, (128, ), (1, )) assert_size_stride(primals_24, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_25, (128, ), (1, )) assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_27, (128, ), (1, )) assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_29, (128, ), (1, )) assert_size_stride(primals_30, (512, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_31, (512, ), (1, )) assert_size_stride(primals_32, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_33, (256, ), (1, )) assert_size_stride(primals_34, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (256, ), (1, )) assert_size_stride(primals_36, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (256, ), (1, )) assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (256, ), (1, )) assert_size_stride(primals_40, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_41, (256, ), (1, )) assert_size_stride(primals_42, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_43, (256, ), (1, )) assert_size_stride(primals_44, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_45, (256, ), (1, )) assert_size_stride(primals_46, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_47, (512, ), (1, )) assert_size_stride(primals_48, (512, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_49, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_50, (128, ), (1, )) assert_size_stride(primals_51, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_52, (128, ), (1, )) assert_size_stride(primals_53, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_54, (256, ), (1, )) assert_size_stride(primals_55, (256, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_56, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_57, (64, ), (1, )) assert_size_stride(primals_58, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_59, (64, ), (1, )) assert_size_stride(primals_60, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_61, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_2, relu_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf5, primals_7, 524288, grid=grid(524288), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_3, relu_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf7, primals_9, 524288, grid=grid(524288), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf9 = buf8; del buf8 # reuse buf63 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4, relu_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_1.run(buf9, primals_11, buf63, 49152, grid=grid(49152), stream=stream0) del primals_11 buf10 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(primals_3, buf9, buf10, 98304, grid=grid(98304), stream=stream0) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [x_6, relu_5], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf12, primals_13, 1048576, grid=grid(1048576), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution] buf13 = extern_kernels.convolution(buf12, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [x_7, relu_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf14, primals_15, 1048576, grid=grid(1048576), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf14, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [x_8, relu_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf16, primals_17, 1048576, grid=grid(1048576), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [x_9, relu_8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf18, primals_19, 1048576, grid=grid(1048576), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf18, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf20 = buf19; del buf19 # reuse # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf20, primals_21, 1048576, grid=grid(1048576), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf20, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf22 = buf21; del buf21 # reuse # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] triton_poi_fused_convolution_5.run(buf22, primals_23, 524288, grid=grid(524288), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] buf23 = extern_kernels.convolution(buf22, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf24 = buf23; del buf23 # reuse # Topologically Sorted Source Nodes: [x_12, relu_9], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf24, primals_25, 524288, grid=grid(524288), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution] buf25 = extern_kernels.convolution(buf24, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf26 = buf25; del buf25 # reuse # Topologically Sorted Source Nodes: [x_13, relu_10], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf26, primals_27, 524288, grid=grid(524288), stream=stream0) del primals_27 # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf26, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf28 = buf27; del buf27 # reuse # Topologically Sorted Source Nodes: [x_14, relu_11], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf28, primals_29, 524288, grid=grid(524288), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] buf29 = extern_kernels.convolution(buf28, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 512, 16, 16), (131072, 256, 16, 1)) buf30 = buf29; del buf29 # reuse # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf30, primals_31, 524288, grid=grid(524288), stream=stream0) del primals_31 # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] buf31 = extern_kernels.convolution(buf30, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 16, 16), (65536, 256, 16, 1)) buf32 = buf31; del buf31 # reuse # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] triton_poi_fused_convolution_8.run(buf32, primals_33, 262144, grid=grid(262144), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution] buf33 = extern_kernels.convolution(buf32, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 256, 16, 16), (65536, 256, 16, 1)) buf34 = buf33; del buf33 # reuse # Topologically Sorted Source Nodes: [x_17, relu_12], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf34, primals_35, 262144, grid=grid(262144), stream=stream0) del primals_35 # Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution] buf35 = extern_kernels.convolution(buf34, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 256, 16, 16), (65536, 256, 16, 1)) buf36 = buf35; del buf35 # reuse # Topologically Sorted Source Nodes: [x_18, relu_13], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf36, primals_37, 262144, grid=grid(262144), stream=stream0) del primals_37 # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution] buf37 = extern_kernels.convolution(buf36, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 256, 16, 16), (65536, 256, 16, 1)) buf38 = buf37; del buf37 # reuse # Topologically Sorted Source Nodes: [x_19, relu_14], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf38, primals_39, 262144, grid=grid(262144), stream=stream0) del primals_39 # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution] buf39 = extern_kernels.convolution(buf38, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 256, 16, 16), (65536, 256, 16, 1)) buf40 = buf39; del buf39 # reuse # Topologically Sorted Source Nodes: [x_20, relu_15], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf40, primals_41, 262144, grid=grid(262144), stream=stream0) del primals_41 # Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.convolution] buf41 = extern_kernels.convolution(buf40, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf41, (4, 256, 16, 16), (65536, 256, 16, 1)) buf42 = buf41; del buf41 # reuse # Topologically Sorted Source Nodes: [x_21, relu_16], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf42, primals_43, 262144, grid=grid(262144), stream=stream0) del primals_43 # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution] buf43 = extern_kernels.convolution(buf42, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 256, 16, 16), (65536, 256, 16, 1)) buf44 = buf43; del buf43 # reuse # Topologically Sorted Source Nodes: [x_22, relu_17], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf44, primals_45, 262144, grid=grid(262144), stream=stream0) del primals_45 # Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.convolution] buf45 = extern_kernels.convolution(buf44, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 512, 16, 16), (131072, 256, 16, 1)) buf46 = buf45; del buf45 # reuse # Topologically Sorted Source Nodes: [x_23, relu_18], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_10.run(buf46, primals_47, 524288, grid=grid(524288), stream=stream0) del primals_47 # Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution] buf47 = extern_kernels.convolution(buf46, primals_48, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf48 = buf47; del buf47 # reuse # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add] triton_poi_fused_add_11.run(buf48, buf28, 524288, grid=grid(524288), stream=stream0) # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution] buf49 = extern_kernels.convolution(buf48, primals_49, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf49, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf50 = buf49; del buf49 # reuse # Topologically Sorted Source Nodes: [x_26, relu_19], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf50, primals_50, 524288, grid=grid(524288), stream=stream0) del primals_50 # Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution] buf51 = extern_kernels.convolution(buf50, primals_51, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf52 = buf51; del buf51 # reuse # Topologically Sorted Source Nodes: [x_27, relu_20], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf52, primals_52, 524288, grid=grid(524288), stream=stream0) del primals_52 # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution] buf53 = extern_kernels.convolution(buf52, primals_53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf54 = buf53; del buf53 # reuse # Topologically Sorted Source Nodes: [x_28, relu_21], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_12.run(buf54, primals_54, 1048576, grid=grid(1048576), stream=stream0) del primals_54 # Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.convolution] buf55 = extern_kernels.convolution(buf54, primals_55, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf56 = buf55; del buf55 # reuse # Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.add] triton_poi_fused_add_13.run(buf56, buf18, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution] buf57 = extern_kernels.convolution(buf56, primals_56, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf58 = buf57; del buf57 # reuse # Topologically Sorted Source Nodes: [x_31, relu_22], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf58, primals_57, 1048576, grid=grid(1048576), stream=stream0) del primals_57 # Topologically Sorted Source Nodes: [x_32], Original ATen: [aten.convolution] buf59 = extern_kernels.convolution(buf58, primals_58, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf60 = buf59; del buf59 # reuse # Topologically Sorted Source Nodes: [x_32, relu_23], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_3.run(buf60, primals_59, 1048576, grid=grid(1048576), stream=stream0) del primals_59 # Topologically Sorted Source Nodes: [x_33], Original ATen: [aten.convolution] buf61 = extern_kernels.convolution(buf60, primals_60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf62 = buf61; del buf61 # reuse # Topologically Sorted Source Nodes: [x_33, y], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_14.run(buf62, primals_3, primals_61, 49152, grid=grid(49152), stream=stream0) del primals_61 return (buf9, buf62, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_49, primals_51, primals_53, primals_55, primals_56, primals_58, primals_60, buf1, buf3, buf5, buf7, buf10, buf12, buf14, buf16, buf18, buf20, buf22, buf24, buf26, buf28, buf30, buf32, buf34, buf36, buf38, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf63, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((3, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((64, 6, 3, 3), (54, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((256, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((512, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((256, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_48 = rand_strided((512, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_49 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_50 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_51 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_52 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_53 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_54 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_55 = rand_strided((256, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_56 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_57 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_58 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_59 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_60 = rand_strided((3, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_61 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class CBDNet(nn.Module): def __init__(self): super(CBDNet, self).__init__() self.relu = nn.ReLU(inplace=True) self.E01 = nn.Conv2d(3, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E02 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E03 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E04 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E05 = nn.Conv2d(32, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer00 = nn.Conv2d(6, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer02 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer03 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS02 = nn.Conv2d(64, 256, kernel_size=[2, 2], stride=(2, 2)) self.DS02_layer00_cf = nn.Conv2d(256, 128, kernel_size=[1, 1], stride=(1, 1)) self.DS02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer02 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03 = nn.Conv2d(128, 512, kernel_size=[2, 2], stride=(2, 2)) self.DS03_layer00_cf = nn.Conv2d(512, 256, kernel_size=[1, 1], stride=(1, 1)) self.DS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.UPS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer03 = nn.Conv2d(256, 512, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.USP02 = nn.ConvTranspose2d(512, 128, kernel_size=[2, 2], stride=(2, 2), bias=False) self.US02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer02 = nn.Conv2d(128, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.USP01 = nn.ConvTranspose2d(256, 64, kernel_size=[2, 2], stride =(2, 2), bias=False) self.US01_layer00 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer02 = nn.Conv2d(64, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) def forward(self, input): x = self.E01(input) self.relu(x) x = self.E02(x) self.relu(x) x = self.E03(x) self.relu(x) x = self.E04(x) self.relu(x) x = self.E05(x) self.relu(x) noise_level = x x = torch.cat((input, noise_level), dim=1) x = self.DS01_layer00(x) self.relu(x) x = self.DS01_layer01(x) self.relu(x) x = self.DS01_layer02(x) self.relu(x) x = self.DS01_layer03(x) self.relu(x) down1_result = x x = self.DS02(down1_result) x = self.DS02_layer00_cf(x) x = self.DS02_layer00(x) self.relu(x) x = self.DS02_layer01(x) self.relu(x) x = self.DS02_layer02(x) self.relu(x) down2_result = x x = self.DS03(down2_result) x = self.DS03_layer00_cf(x) x = self.DS03_layer00(x) self.relu(x) x = self.DS03_layer01(x) self.relu(x) x = self.DS03_layer02(x) self.relu(x) x = self.UPS03_layer00(x) self.relu(x) x = self.UPS03_layer01(x) self.relu(x) x = self.UPS03_layer02(x) self.relu(x) x = self.UPS03_layer03(x) self.relu(x) x = self.USP02(x) x = torch.add(x, 1, down2_result) del down2_result x = self.US02_layer00(x) self.relu(x) x = self.US02_layer01(x) self.relu(x) x = self.US02_layer02(x) self.relu(x) x = self.USP01(x) x = torch.add(x, 1, down1_result) del down1_result x = self.US01_layer00(x) self.relu(x) x = self.US01_layer01(x) self.relu(x) x = self.US01_layer02(x) y = torch.add(input, 1, x) del x return noise_level, y def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, None) tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 6 x0 = xindex % 4096 x2 = xindex // 24576 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 12288 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 6, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-3 + x1) + 12288 * x2), tmp6, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, None) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_add_convolution_14(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_out_ptr0 + x3, None) tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (3, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (3,), (1,)) assert_size_stride(primals_12, (64, 6, 3, 3), (54, 9, 3, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (256, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_21, (256,), (1,)) assert_size_stride(primals_22, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_23, (128,), (1,)) assert_size_stride(primals_24, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_25, (128,), (1,)) assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_27, (128,), (1,)) assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_29, (128,), (1,)) assert_size_stride(primals_30, (512, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_31, (512,), (1,)) assert_size_stride(primals_32, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_33, (256,), (1,)) assert_size_stride(primals_34, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (256,), (1,)) assert_size_stride(primals_36, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (256,), (1,)) assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (256,), (1,)) assert_size_stride(primals_40, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_41, (256,), (1,)) assert_size_stride(primals_42, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_43, (256,), (1,)) assert_size_stride(primals_44, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_45, (256,), (1,)) assert_size_stride(primals_46, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_47, (512,), (1,)) assert_size_stride(primals_48, (512, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_49, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_50, (128,), (1,)) assert_size_stride(primals_51, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_52, (128,), (1,)) assert_size_stride(primals_53, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_54, (256,), (1,)) assert_size_stride(primals_55, (256, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_56, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_57, (64,), (1,)) assert_size_stride(primals_58, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_59, (64,), (1,)) assert_size_stride(primals_60, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_61, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(524288)](buf3, primals_5, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(524288)](buf5, primals_7, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_0[grid(524288)](buf7, primals_9, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf9 = buf8 del buf8 buf63 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(49152)]( buf9, primals_11, buf63, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) triton_poi_fused_cat_2[grid(98304)](primals_3, buf9, buf10, 98304, XBLOCK=1024, num_warps=4, num_stages=1) buf11 = extern_kernels.convolution(buf10, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_3[grid(1048576)](buf12, primals_13, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf13 = extern_kernels.convolution(buf12, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf14 = buf13 del buf13 triton_poi_fused_convolution_relu_3[grid(1048576)](buf14, primals_15, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf15 = extern_kernels.convolution(buf14, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_3[grid(1048576)](buf16, primals_17, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf17 = extern_kernels.convolution(buf16, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_3[grid(1048576)](buf18, primals_19, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf19 = extern_kernels.convolution(buf18, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf20 = buf19 del buf19 triton_poi_fused_convolution_4[grid(1048576)](buf20, primals_21, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf21 = extern_kernels.convolution(buf20, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf22 = buf21 del buf21 triton_poi_fused_convolution_5[grid(524288)](buf22, primals_23, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf23 = extern_kernels.convolution(buf22, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf24 = buf23 del buf23 triton_poi_fused_convolution_relu_6[grid(524288)](buf24, primals_25, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf25 = extern_kernels.convolution(buf24, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf25, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf26 = buf25 del buf25 triton_poi_fused_convolution_relu_6[grid(524288)](buf26, primals_27, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 buf27 = extern_kernels.convolution(buf26, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf28 = buf27 del buf27 triton_poi_fused_convolution_relu_6[grid(524288)](buf28, primals_29, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_29 buf29 = extern_kernels.convolution(buf28, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 512, 16, 16), (131072, 256, 16, 1)) buf30 = buf29 del buf29 triton_poi_fused_convolution_7[grid(524288)](buf30, primals_31, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf31 = extern_kernels.convolution(buf30, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 16, 16), (65536, 256, 16, 1)) buf32 = buf31 del buf31 triton_poi_fused_convolution_8[grid(262144)](buf32, primals_33, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_33 buf33 = extern_kernels.convolution(buf32, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 256, 16, 16), (65536, 256, 16, 1)) buf34 = buf33 del buf33 triton_poi_fused_convolution_relu_9[grid(262144)](buf34, primals_35, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_35 buf35 = extern_kernels.convolution(buf34, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 256, 16, 16), (65536, 256, 16, 1)) buf36 = buf35 del buf35 triton_poi_fused_convolution_relu_9[grid(262144)](buf36, primals_37, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_37 buf37 = extern_kernels.convolution(buf36, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 256, 16, 16), (65536, 256, 16, 1)) buf38 = buf37 del buf37 triton_poi_fused_convolution_relu_9[grid(262144)](buf38, primals_39, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_39 buf39 = extern_kernels.convolution(buf38, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 256, 16, 16), (65536, 256, 16, 1)) buf40 = buf39 del buf39 triton_poi_fused_convolution_relu_9[grid(262144)](buf40, primals_41, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_41 buf41 = extern_kernels.convolution(buf40, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf41, (4, 256, 16, 16), (65536, 256, 16, 1)) buf42 = buf41 del buf41 triton_poi_fused_convolution_relu_9[grid(262144)](buf42, primals_43, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_43 buf43 = extern_kernels.convolution(buf42, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 256, 16, 16), (65536, 256, 16, 1)) buf44 = buf43 del buf43 triton_poi_fused_convolution_relu_9[grid(262144)](buf44, primals_45, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_45 buf45 = extern_kernels.convolution(buf44, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 512, 16, 16), (131072, 256, 16, 1)) buf46 = buf45 del buf45 triton_poi_fused_convolution_relu_10[grid(524288)](buf46, primals_47, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_47 buf47 = extern_kernels.convolution(buf46, primals_48, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf48 = buf47 del buf47 triton_poi_fused_add_11[grid(524288)](buf48, buf28, 524288, XBLOCK= 1024, num_warps=4, num_stages=1) buf49 = extern_kernels.convolution(buf48, primals_49, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf49, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf50 = buf49 del buf49 triton_poi_fused_convolution_relu_6[grid(524288)](buf50, primals_50, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_50 buf51 = extern_kernels.convolution(buf50, primals_51, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf52 = buf51 del buf51 triton_poi_fused_convolution_relu_6[grid(524288)](buf52, primals_52, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_52 buf53 = extern_kernels.convolution(buf52, primals_53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf54 = buf53 del buf53 triton_poi_fused_convolution_relu_12[grid(1048576)](buf54, primals_54, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_54 buf55 = extern_kernels.convolution(buf54, primals_55, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf55, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf56 = buf55 del buf55 triton_poi_fused_add_13[grid(1048576)](buf56, buf18, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) buf57 = extern_kernels.convolution(buf56, primals_56, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf57, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf58 = buf57 del buf57 triton_poi_fused_convolution_relu_3[grid(1048576)](buf58, primals_57, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_57 buf59 = extern_kernels.convolution(buf58, primals_58, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf59, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf60 = buf59 del buf59 triton_poi_fused_convolution_relu_3[grid(1048576)](buf60, primals_59, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_59 buf61 = extern_kernels.convolution(buf60, primals_60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf62 = buf61 del buf61 triton_poi_fused_add_convolution_14[grid(49152)](buf62, primals_3, primals_61, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_61 return (buf9, buf62, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_49, primals_51, primals_53, primals_55, primals_56, primals_58, primals_60, buf1, buf3, buf5, buf7, buf10, buf12, buf14, buf16, buf18, buf20, buf22, buf24, buf26, buf28, buf30, buf32, buf34, buf36, buf38, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf63) class CBDNetNew(nn.Module): def __init__(self): super(CBDNetNew, self).__init__() self.relu = nn.ReLU(inplace=True) self.E01 = nn.Conv2d(3, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E02 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E03 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E04 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E05 = nn.Conv2d(32, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer00 = nn.Conv2d(6, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer02 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer03 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS02 = nn.Conv2d(64, 256, kernel_size=[2, 2], stride=(2, 2)) self.DS02_layer00_cf = nn.Conv2d(256, 128, kernel_size=[1, 1], stride=(1, 1)) self.DS02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer02 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03 = nn.Conv2d(128, 512, kernel_size=[2, 2], stride=(2, 2)) self.DS03_layer00_cf = nn.Conv2d(512, 256, kernel_size=[1, 1], stride=(1, 1)) self.DS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.UPS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer03 = nn.Conv2d(256, 512, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.USP02 = nn.ConvTranspose2d(512, 128, kernel_size=[2, 2], stride=(2, 2), bias=False) self.US02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer02 = nn.Conv2d(128, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.USP01 = nn.ConvTranspose2d(256, 64, kernel_size=[2, 2], stride =(2, 2), bias=False) self.US01_layer00 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer02 = nn.Conv2d(64, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) def forward(self, input_0): primals_1 = self.E01.weight primals_2 = self.E01.bias primals_4 = self.E02.weight primals_5 = self.E02.bias primals_6 = self.E03.weight primals_7 = self.E03.bias primals_8 = self.E04.weight primals_9 = self.E04.bias primals_10 = self.E05.weight primals_11 = self.E05.bias primals_12 = self.DS01_layer00.weight primals_13 = self.DS01_layer00.bias primals_14 = self.DS01_layer01.weight primals_15 = self.DS01_layer01.bias primals_16 = self.DS01_layer02.weight primals_17 = self.DS01_layer02.bias primals_18 = self.DS01_layer03.weight primals_19 = self.DS01_layer03.bias primals_20 = self.DS02.weight primals_21 = self.DS02.bias primals_22 = self.DS02_layer00_cf.weight primals_23 = self.DS02_layer00_cf.bias primals_24 = self.DS02_layer00.weight primals_25 = self.DS02_layer00.bias primals_26 = self.DS02_layer01.weight primals_27 = self.DS02_layer01.bias primals_28 = self.DS02_layer02.weight primals_29 = self.DS02_layer02.bias primals_30 = self.DS03.weight primals_31 = self.DS03.bias primals_32 = self.DS03_layer00_cf.weight primals_33 = self.DS03_layer00_cf.bias primals_34 = self.DS03_layer00.weight primals_35 = self.DS03_layer00.bias primals_36 = self.DS03_layer01.weight primals_37 = self.DS03_layer01.bias primals_38 = self.DS03_layer02.weight primals_39 = self.DS03_layer02.bias primals_40 = self.UPS03_layer00.weight primals_41 = self.UPS03_layer00.bias primals_42 = self.UPS03_layer01.weight primals_43 = self.UPS03_layer01.bias primals_44 = self.UPS03_layer02.weight primals_45 = self.UPS03_layer02.bias primals_46 = self.UPS03_layer03.weight primals_47 = self.UPS03_layer03.bias primals_48 = self.USP02.weight primals_49 = self.US02_layer00.weight primals_50 = self.US02_layer00.bias primals_51 = self.US02_layer01.weight primals_52 = self.US02_layer01.bias primals_53 = self.US02_layer02.weight primals_54 = self.US02_layer02.bias primals_55 = self.USP01.weight primals_56 = self.US01_layer00.weight primals_57 = self.US01_layer00.bias primals_58 = self.US01_layer01.weight primals_59 = self.US01_layer01.bias primals_60 = self.US01_layer02.weight primals_61 = self.US01_layer02.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61]) return output[0], output[1]
delldu/ImageClean
CBDNet
false
1,852
[ "MIT" ]
0
ffa5b180d36afb3840c6b36c08a767c520068498
https://github.com/delldu/ImageClean/tree/ffa5b180d36afb3840c6b36c08a767c520068498
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.relu = nn.ReLU(inplace=True) self.E01 = nn.Conv2d(3, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E02 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E03 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E04 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E05 = nn.Conv2d(32, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer00 = nn.Conv2d(6, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer02 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer03 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS02 = nn.Conv2d(64, 256, kernel_size=[2, 2], stride=(2, 2)) self.DS02_layer00_cf = nn.Conv2d(256, 128, kernel_size=[1, 1], stride=(1, 1)) self.DS02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer02 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03 = nn.Conv2d(128, 512, kernel_size=[2, 2], stride=(2, 2)) self.DS03_layer00_cf = nn.Conv2d(512, 256, kernel_size=[1, 1], stride=(1, 1)) self.DS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.UPS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer03 = nn.Conv2d(256, 512, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.USP02 = nn.ConvTranspose2d(512, 128, kernel_size=[2, 2], stride=(2, 2), bias=False) self.US02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer02 = nn.Conv2d(128, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.USP01 = nn.ConvTranspose2d(256, 64, kernel_size=[2, 2], stride =(2, 2), bias=False) self.US01_layer00 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer02 = nn.Conv2d(64, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) def forward(self, input): x = self.E01(input) self.relu(x) x = self.E02(x) self.relu(x) x = self.E03(x) self.relu(x) x = self.E04(x) self.relu(x) x = self.E05(x) self.relu(x) noise_level = x x = torch.cat((input, noise_level), dim=1) x = self.DS01_layer00(x) self.relu(x) x = self.DS01_layer01(x) self.relu(x) x = self.DS01_layer02(x) self.relu(x) x = self.DS01_layer03(x) self.relu(x) down # ... truncated (>4000 chars) for memory efficiency
LinearThreeDeep
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [hiddenNodesL1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # hiddenNodesL1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py # Topologically Sorted Source Nodes: [outputNodes], Original ATen: [aten._softmax] # Source node to ATen node mapping: # outputNodes => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_7, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_7, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py # Topologically Sorted Source Nodes: [outputNodes], Original ATen: [aten._softmax] # Source node to ATen node mapping: # outputNodes => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [hiddenNodesL1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf11, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [hiddenNodesL2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf10, 256, grid=grid(256), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [hiddenNodesL3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf9, 256, grid=grid(256), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [outputNodes], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [outputNodes], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf7, buf8, 256, grid=grid(256), stream=stream0) del buf7 return (buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0), buf8, primals_8, buf9, primals_6, buf10, primals_4, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class LinearThreeDeep(nn.Module): def __init__(self, inputSize, hiddenSize1, hiddenSize2, hiddenSize3, outputSize): super().__init__() self.inputLinear = nn.Linear(inputSize, hiddenSize1) self.hiddenLinear1 = nn.Linear(hiddenSize1, hiddenSize2) self.hiddenLinear2 = nn.Linear(hiddenSize2, hiddenSize3) self.outputLinear = nn.Linear(hiddenSize3, outputSize) def forward(self, inputNodes): hiddenNodesL1 = F.relu(self.inputLinear(inputNodes)) hiddenNodesL2 = F.relu(self.hiddenLinear1(hiddenNodesL1)) hiddenNodesL3 = F.relu(self.hiddenLinear2(hiddenNodesL2)) outputNodes = F.softmax(self.outputLinear(hiddenNodesL3)) return outputNodes def getParameters(self): return tuple(self.parameters()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputSize': 4, 'hiddenSize1': 4, 'hiddenSize2': 4, 'hiddenSize3': 4, 'outputSize': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5, primals_7, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused__softmax_2[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0 ), buf8, primals_8, buf9, primals_6, buf10, primals_4, buf11 class LinearThreeDeepNew(nn.Module): def __init__(self, inputSize, hiddenSize1, hiddenSize2, hiddenSize3, outputSize): super().__init__() self.inputLinear = nn.Linear(inputSize, hiddenSize1) self.hiddenLinear1 = nn.Linear(hiddenSize1, hiddenSize2) self.hiddenLinear2 = nn.Linear(hiddenSize2, hiddenSize3) self.outputLinear = nn.Linear(hiddenSize3, outputSize) def getParameters(self): return tuple(self.parameters()) def forward(self, input_0): primals_1 = self.inputLinear.weight primals_2 = self.inputLinear.bias primals_4 = self.hiddenLinear1.weight primals_5 = self.hiddenLinear1.bias primals_6 = self.hiddenLinear2.weight primals_7 = self.hiddenLinear2.bias primals_8 = self.outputLinear.weight primals_9 = self.outputLinear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
dmechea/PyTorch-CartPole
LinearThreeDeep
false
1,853
[ "MIT" ]
0
9f49ac7b2ae59882e5ea66cc8f43f0354a120c49
https://github.com/dmechea/PyTorch-CartPole/tree/9f49ac7b2ae59882e5ea66cc8f43f0354a120c49
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, inputSize, hiddenSize1, hiddenSize2, hiddenSize3, outputSize): super().__init__() self.inputLinear = nn.Linear(inputSize, hiddenSize1) self.hiddenLinear1 = nn.Linear(hiddenSize1, hiddenSize2) self.hiddenLinear2 = nn.Linear(hiddenSize2, hiddenSize3) self.outputLinear = nn.Linear(hiddenSize3, outputSize) def forward(self, inputNodes): hiddenNodesL1 = F.relu(self.inputLinear(inputNodes)) hiddenNodesL2 = F.relu(self.hiddenLinear1(hiddenNodesL1)) hiddenNodesL3 = F.relu(self.hiddenLinear2(hiddenNodesL2)) outputNodes = F.softmax(self.outputLinear(hiddenNodesL3)) return outputNodes def getParameters(self): return tuple(self.parameters()) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputSize': 4, 'hiddenSize1': 4, 'hiddenSize2': 4, 'hiddenSize3': 4, 'outputSize': 4}]
SoftDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/gh/cghtdk4mck6ox4iksbiwuwymjyuekmgskfsglq5j7gdpp2ymwbiq.py # Topologically Sorted Source Nodes: [tp, tp_1, sub, fp, fp_1, sub_1, fn, fn_1], Original ATen: [aten.mul, aten.sum, aten.rsub] # Source node to ATen node mapping: # fn => mul_2 # fn_1 => sum_3 # fp => mul_1 # fp_1 => sum_2 # sub => sub # sub_1 => sub_1 # tp => mul # tp_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sub), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2, 3]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [2, 3]), kwargs = {}) triton_per_fused_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_rsub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 1.0 tmp8 = tmp7 - tmp1 tmp9 = tmp0 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp7 - tmp0 tmp15 = tmp14 * tmp1 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tl.store(out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr1 + (x0), tmp13, xmask) tl.store(out_ptr2 + (x0), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/wd/cwdwsm4uqfah4my5p6u3xvnnfxc3e32ficijpvc2khdxcn4puf4l.py # Topologically Sorted Source Nodes: [mul_4, nominator, mul_5, add_1, add_2, denominator, add_4, dc, dc_1, neg], Original ATen: [aten.mul, aten.add, aten.div, aten.mean, aten.neg] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # add_4 => add_4 # dc => div # dc_1 => mean # denominator => add_3 # mul_4 => mul_4 # mul_5 => mul_5 # neg => neg # nominator => add # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 1.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %sum_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %sum_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1.0), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, 1e-08), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) triton_per_fused_add_div_mean_mul_neg_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_neg_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_neg_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_neg_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp7 = tl.load(in_ptr2 + (r0), None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp6 = tmp2 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = tmp8 + tmp3 tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = tmp4 / tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp16 = 16.0 tmp17 = tmp15 / tmp16 tmp18 = -tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tp, tp_1, sub, fp, fp_1, sub_1, fn, fn_1], Original ATen: [aten.mul, aten.sum, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_mul_rsub_sum_0.run(arg0_1, arg1_1, buf0, buf1, buf2, 16, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [mul_4, nominator, mul_5, add_1, add_2, denominator, add_4, dc, dc_1, neg], Original ATen: [aten.mul, aten.add, aten.div, aten.mean, aten.neg] triton_per_fused_add_div_mean_mul_neg_1.run(buf4, buf0, buf1, buf2, 1, 16, grid=grid(1), stream=stream0) del buf0 del buf1 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch._C import torch.serialization def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: can be (, ) = no summation :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]): y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x, device=net_output.device) y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot tn = (1 - net_output) * (1 - y_onehot) if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tn = tn ** 2 if len(axes) > 0: tp = tp.sum(axes, keepdim=False) fp = fp.sum(axes, keepdim=False) fn = fn.sum(axes, keepdim=False) tn = tn.sum(axes, keepdim=False) return tp, fp, fn, tn class SoftDiceLoss(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.0): """ """ super(SoftDiceLoss, self).__init__() self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth def forward(self, x, y, loss_mask=None): shp_x = x.shape if self.batch_dice: axes = [0] + list(range(2, len(shp_x))) else: axes = list(range(2, len(shp_x))) if self.apply_nonlin is not None: x = self.apply_nonlin(x) tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False) nominator = 2 * tp + self.smooth denominator = 2 * tp + fp + fn + self.smooth dc = nominator / (denominator + 1e-08) if not self.do_bg: if self.batch_dice: dc = dc[1:] else: dc = dc[:, 1:] dc = dc.mean() return -dc def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 1.0 tmp8 = tmp7 - tmp1 tmp9 = tmp0 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp7 - tmp0 tmp15 = tmp14 * tmp1 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) tl.store(out_ptr2 + x0, tmp19, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_neg_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp7 = tl.load(in_ptr2 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp6 = tmp2 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = tmp8 + tmp3 tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = tmp4 / tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp16 = 16.0 tmp17 = tmp15 / tmp16 tmp18 = -tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_mul_rsub_sum_0[grid(16)](arg0_1, arg1_1, buf0, buf1, buf2, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mean_mul_neg_1[grid(1)](buf4, buf0, buf1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: can be (, ) = no summation :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]): y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x, device=net_output.device) y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot tn = (1 - net_output) * (1 - y_onehot) if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tn = tn ** 2 if len(axes) > 0: tp = tp.sum(axes, keepdim=False) fp = fp.sum(axes, keepdim=False) fn = fn.sum(axes, keepdim=False) tn = tn.sum(axes, keepdim=False) return tp, fp, fn, tn class SoftDiceLossNew(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.0): """ """ super(SoftDiceLossNew, self).__init__() self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dkswxd/Swin-Transformer-Semantic-Segmentation
SoftDiceLoss
false
1,855
[ "Apache-2.0" ]
0
6af19736e5492a01d8952d4ee86a6d59b21c2ae1
https://github.com/dkswxd/Swin-Transformer-Semantic-Segmentation/tree/6af19736e5492a01d8952d4ee86a6d59b21c2ae1
import torch import torch.nn as nn import torch._C import torch.serialization def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: can be (, ) = no summation :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]): y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x, device=net_output.device) y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot tn = (1 - net_output) * (1 - y_onehot) if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tn = tn ** 2 if len(axes) > 0: tp = tp.sum(axes, keepdim=False) fp = fp.sum(axes, keepdim=False) fn = fn.sum(axes, keepdim=False) tn = tn.sum(axes, keepdim=False) return tp, fp, fn, tn class Model(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.0): """ """ super().__init__() self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth def forward(self, x, y, loss_mask=None): shp_x = x.shape if self.batch_dice: axes = [0] + list(range(2, len(shp_x))) else: axes = list(range(2, len(shp_x))) if self.apply_nonlin is not None: x = self.apply_nonlin(x) tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False) nominator = 2 * tp + self.smooth denominator = 2 * tp + fp + fn + self.smooth dc = nominator / (denominator + 1e-08) if not self.do_bg: if self.batch_dice: dc = dc[1:] else: dc = dc[:, 1:] dc = dc.mean() return -dc def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
GCNLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/jw/cjwdgjmgprkeplqkeqfp62fftsyzegxwif7tauvk3csicpqty4l7.py # Topologically Sorted Source Nodes: [num_neighbors, node_feats_2], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # node_feats_2 => div # num_neighbors => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, %sum_1), kwargs = {}) triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [node_feats], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [node_feats_1], Original ATen: [aten.bmm] extern_kernels.bmm(primals_1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf1) del buf0 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [num_neighbors, node_feats_2], Original ATen: [aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sum_0.run(buf2, primals_1, 64, grid=grid(64), stream=stream0) return (buf2, primals_1, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GCNLayer(nn.Module): def __init__(self, c_in, c_out): super().__init__() self.projection = nn.Linear(c_in, c_out) def forward(self, nodes_feats, adj_matrix): num_neighbors = adj_matrix.sum(dim=-1, keepdims=True) node_feats = self.projection(nodes_feats) node_feats = torch.bmm(adj_matrix, node_feats) node_feats = node_feats / num_neighbors return node_feats def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'c_in': 4, 'c_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(buf0, (4, 4, 4), ( 16, 4, 1), 0), out=buf1) del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_div_sum_0[grid(64)](buf2, primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, primals_1, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0) class GCNLayerNew(nn.Module): def __init__(self, c_in, c_out): super().__init__() self.projection = nn.Linear(c_in, c_out) def forward(self, input_0, input_1): primals_2 = self.projection.weight primals_3 = self.projection.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
dogeplusplus/sandbox
GCNLayer
false
1,856
[ "MIT" ]
0
c9041c06da9454f6c3cec622abbbf918c9f13bdc
https://github.com/dogeplusplus/sandbox/tree/c9041c06da9454f6c3cec622abbbf918c9f13bdc
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, c_in, c_out): super().__init__() self.projection = nn.Linear(c_in, c_out) def forward(self, nodes_feats, adj_matrix): num_neighbors = adj_matrix.sum(dim=-1, keepdims=True) node_feats = self.projection(nodes_feats) node_feats = torch.bmm(adj_matrix, node_feats) node_feats = node_feats / num_neighbors return node_feats def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
DilateConv
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [4, 4], [4, 4], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DilateConv(nn.Module): """ d_rate: dilation rate H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\\_size[0] - 1) - 1) / stride[0] + 1) set kernel size to 3, stride to 1, padding==d_rate ==> spatial size kept """ def __init__(self, d_rate, in_ch, out_ch): super(DilateConv, self).__init__() self.d_conv = nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=d_rate, dilation=d_rate) def forward(self, x): return self.d_conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_rate': 4, 'in_ch': 4, 'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(4, 4), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class DilateConvNew(nn.Module): """ d_rate: dilation rate H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\\_size[0] - 1) - 1) / stride[0] + 1) set kernel size to 3, stride to 1, padding==d_rate ==> spatial size kept """ def __init__(self, d_rate, in_ch, out_ch): super(DilateConvNew, self).__init__() self.d_conv = nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=d_rate, dilation=d_rate) def forward(self, input_0): primals_1 = self.d_conv.weight primals_2 = self.d_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
donghaW/RCF-pytorch
DilateConv
false
1,857
[ "MIT" ]
0
6380209ef747abefa87637e60d33369ba423814d
https://github.com/donghaW/RCF-pytorch/tree/6380209ef747abefa87637e60d33369ba423814d
import torch import torch.nn as nn class Model(nn.Module): """ d_rate: dilation rate H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\\_size[0] - 1) - 1) / stride[0] + 1) set kernel size to 3, stride to 1, padding==d_rate ==> spatial size kept """ def __init__(self, d_rate, in_ch, out_ch): super().__init__() self.d_conv = nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=d_rate, dilation=d_rate) def forward(self, x): return self.d_conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 4]
GDL
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/7o/c7oz33m5hjjfbr5kw2ssk753oov7j2di4j6ikicukj7lu2b5xign.py # Topologically Sorted Source Nodes: [tp, tp_1, sum_5, sub, fp, fp_1, sub_1, fn, fn_1], Original ATen: [aten.mul, aten.sum, aten.rsub] # Source node to ATen node mapping: # fn => mul_2 # fn_1 => sum_3 # fp => mul_1 # fp_1 => sum_2 # sub => sub # sub_1 => sub_1 # sum_5 => sum_5 # tp => mul # tp_1 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3]), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [2, 3]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sub), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2, 3]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [2, 3]), kwargs = {}) triton_per_fused_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_rsub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 1.0 tmp8 = tmp7 - tmp1 tmp9 = tmp0 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp7 - tmp0 tmp15 = tmp14 * tmp1 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp22 = tl.where(xmask, tmp20, 0) tmp23 = tl.sum(tmp22, 1)[:, None] tl.store(out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr1 + (x0), tmp13, xmask) tl.store(out_ptr2 + (x0), tmp19, xmask) tl.store(out_ptr3 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/o5/co5herzzwwbihct6zo7bs7dh3323o534vx66u7xql7y5vej3jbn3.py # Topologically Sorted Source Nodes: [volumes, tp_2, tp_3, mul_4, add_1, mul_5, fp_2, fp_3, add_2, fn_2, fn_3, add_3, add_4, dc, dc_1, neg, mul_6], Original ATen: [aten.add, aten.div, aten.sum, aten.mul, aten.mean, aten.neg] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # add_4 => add_4 # dc => div_3 # dc_1 => mean # fn_2 => div_2 # fn_3 => sum_8 # fp_2 => div_1 # fp_3 => sum_7 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # neg => neg # tp_2 => div # tp_3 => sum_6 # volumes => add # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) # %sum_6 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div, [1]), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, 2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 1.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, 2), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %add), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div_1, [1]), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %sum_7), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %add), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div_2, [1]), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %sum_8), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, 1.0), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_3,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, 1.0), kwargs = {}) triton_per_fused_add_div_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_neg_sum_1(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr3 + (4*r0), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr3 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr3 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp2 = 1e-06 tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tmp7 = tmp6 + tmp2 tmp8 = tmp5 / tmp7 tmp9 = tmp4 + tmp8 tmp12 = tmp11 + tmp2 tmp13 = tmp10 / tmp12 tmp14 = tmp9 + tmp13 tmp17 = tmp16 + tmp2 tmp18 = tmp15 / tmp17 tmp19 = tmp14 + tmp18 tmp20 = 2.0 tmp21 = tmp19 * tmp20 tmp23 = tmp22 / tmp3 tmp25 = tmp24 / tmp7 tmp26 = tmp23 + tmp25 tmp28 = tmp27 / tmp12 tmp29 = tmp26 + tmp28 tmp31 = tmp30 / tmp17 tmp32 = tmp29 + tmp31 tmp33 = tmp21 + tmp32 tmp35 = tmp34 / tmp3 tmp37 = tmp36 / tmp7 tmp38 = tmp35 + tmp37 tmp40 = tmp39 / tmp12 tmp41 = tmp38 + tmp40 tmp43 = tmp42 / tmp17 tmp44 = tmp41 + tmp43 tmp45 = tmp33 + tmp44 tmp46 = 1.0 tmp47 = tmp21 + tmp46 tmp48 = tmp45 + tmp46 tmp49 = tmp47 / tmp48 tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.sum(tmp50, 1)[:, None] tmp53 = 4.0 tmp54 = tmp52 / tmp53 tmp55 = -tmp54 tmp56 = tmp55 * tmp46 tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp56, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tp, tp_1, sum_5, sub, fp, fp_1, sub_1, fn, fn_1], Original ATen: [aten.mul, aten.sum, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_mul_rsub_sum_0.run(arg0_1, arg1_1, buf0, buf3, buf5, buf1, 16, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [volumes, tp_2, tp_3, mul_4, add_1, mul_5, fp_2, fp_3, add_2, fn_2, fn_3, add_3, add_4, dc, dc_1, neg, mul_6], Original ATen: [aten.add, aten.div, aten.sum, aten.mul, aten.mean, aten.neg] triton_per_fused_add_div_mean_mul_neg_sum_1.run(buf8, buf0, buf1, buf3, buf5, 1, 4, grid=grid(1), stream=stream0) del buf0 del buf1 del buf3 del buf5 return (buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch._C import torch.serialization def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: can be (, ) = no summation :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]): y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x, device=net_output.device) y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot tn = (1 - net_output) * (1 - y_onehot) if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tn = tn ** 2 if len(axes) > 0: tp = tp.sum(axes, keepdim=False) fp = fp.sum(axes, keepdim=False) fn = fn.sum(axes, keepdim=False) tn = tn.sum(axes, keepdim=False) return tp, fp, fn, tn class GDL(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.0, square=False, square_volumes=False, loss_weight=1.0): """ square_volumes will square the weight term. The paper recommends square_volumes=True; I don't (just an intuition) """ super(GDL, self).__init__() self.square_volumes = square_volumes self.square = square self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth self.loss_weight = loss_weight def forward(self, x, y, loss_mask=None): shp_x = x.shape shp_y = y.shape if self.batch_dice: axes = [0] + list(range(2, len(shp_x))) else: axes = list(range(2, len(shp_x))) if len(shp_x) != len(shp_y): y = y.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(x.shape, y.shape)]): y_onehot = y else: gt = y.long() y_onehot = torch.zeros(shp_x) if x.device.type == 'cuda': y_onehot = y_onehot y_onehot.scatter_(1, gt, 1) if self.apply_nonlin is not None: x = self.apply_nonlin(x) if not self.do_bg: x = x[:, 1:] y_onehot = y_onehot[:, 1:] tp, fp, fn, _ = get_tp_fp_fn_tn(x, y_onehot, axes, loss_mask, self. square) volumes = y_onehot.sum(axes) + 1e-06 if self.square_volumes: volumes = volumes ** 2 tp = tp / volumes fp = fp / volumes fn = fn / volumes if self.batch_dice: axis = 0 else: axis = 1 tp = tp.sum(axis, keepdim=False) fp = fp.sum(axis, keepdim=False) fn = fn.sum(axis, keepdim=False) dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth) dc = dc.mean() return -dc * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_rsub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = 1.0 tmp8 = tmp7 - tmp1 tmp9 = tmp0 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp7 - tmp0 tmp15 = tmp14 * tmp1 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(xmask, tmp16, 0) tmp19 = tl.sum(tmp18, 1)[:, None] tmp20 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp22 = tl.where(xmask, tmp20, 0) tmp23 = tl.sum(tmp22, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp13, xmask) tl.store(out_ptr2 + x0, tmp19, xmask) tl.store(out_ptr3 + x0, tmp23, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_neg_sum_1(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr3 + 4 * r0, None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp39 = tl.load(in_ptr3 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr3 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = 1e-06 tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tmp7 = tmp6 + tmp2 tmp8 = tmp5 / tmp7 tmp9 = tmp4 + tmp8 tmp12 = tmp11 + tmp2 tmp13 = tmp10 / tmp12 tmp14 = tmp9 + tmp13 tmp17 = tmp16 + tmp2 tmp18 = tmp15 / tmp17 tmp19 = tmp14 + tmp18 tmp20 = 2.0 tmp21 = tmp19 * tmp20 tmp23 = tmp22 / tmp3 tmp25 = tmp24 / tmp7 tmp26 = tmp23 + tmp25 tmp28 = tmp27 / tmp12 tmp29 = tmp26 + tmp28 tmp31 = tmp30 / tmp17 tmp32 = tmp29 + tmp31 tmp33 = tmp21 + tmp32 tmp35 = tmp34 / tmp3 tmp37 = tmp36 / tmp7 tmp38 = tmp35 + tmp37 tmp40 = tmp39 / tmp12 tmp41 = tmp38 + tmp40 tmp43 = tmp42 / tmp17 tmp44 = tmp41 + tmp43 tmp45 = tmp33 + tmp44 tmp46 = 1.0 tmp47 = tmp21 + tmp46 tmp48 = tmp45 + tmp46 tmp49 = tmp47 / tmp48 tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.sum(tmp50, 1)[:, None] tmp53 = 4.0 tmp54 = tmp52 / tmp53 tmp55 = -tmp54 tmp56 = tmp55 * tmp46 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp56, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_mul_rsub_sum_0[grid(16)](arg0_1, arg1_1, buf0, buf3, buf5, buf1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf7 = empty_strided_cuda((), (), torch.float32) buf8 = buf7 del buf7 triton_per_fused_add_div_mean_mul_neg_sum_1[grid(1)](buf8, buf0, buf1, buf3, buf5, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf3 del buf5 return buf8, def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: can be (, ) = no summation :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]): y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x, device=net_output.device) y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot tn = (1 - net_output) * (1 - y_onehot) if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tn = tn ** 2 if len(axes) > 0: tp = tp.sum(axes, keepdim=False) fp = fp.sum(axes, keepdim=False) fn = fn.sum(axes, keepdim=False) tn = tn.sum(axes, keepdim=False) return tp, fp, fn, tn class GDLNew(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.0, square=False, square_volumes=False, loss_weight=1.0): """ square_volumes will square the weight term. The paper recommends square_volumes=True; I don't (just an intuition) """ super(GDLNew, self).__init__() self.square_volumes = square_volumes self.square = square self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
dkswxd/Swin-Transformer-Semantic-Segmentation
GDL
false
1,858
[ "Apache-2.0" ]
0
6af19736e5492a01d8952d4ee86a6d59b21c2ae1
https://github.com/dkswxd/Swin-Transformer-Semantic-Segmentation/tree/6af19736e5492a01d8952d4ee86a6d59b21c2ae1
import torch import torch.nn as nn import torch._C import torch.serialization def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): """ net_output must be (b, c, x, y(, z))) gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) if mask is provided it must have shape (b, 1, x, y(, z))) :param net_output: :param gt: :param axes: can be (, ) = no summation :param mask: mask must be 1 for valid pixels and 0 for invalid pixels :param square: if True then fp, tp and fn will be squared before summation :return: """ if axes is None: axes = tuple(range(2, len(net_output.size()))) shp_x = net_output.shape shp_y = gt.shape with torch.no_grad(): if len(shp_x) != len(shp_y): gt = gt.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]): y_onehot = gt else: gt = gt.long() y_onehot = torch.zeros(shp_x, device=net_output.device) y_onehot.scatter_(1, gt, 1) tp = net_output * y_onehot fp = net_output * (1 - y_onehot) fn = (1 - net_output) * y_onehot tn = (1 - net_output) * (1 - y_onehot) if mask is not None: tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1) if square: tp = tp ** 2 fp = fp ** 2 fn = fn ** 2 tn = tn ** 2 if len(axes) > 0: tp = tp.sum(axes, keepdim=False) fp = fp.sum(axes, keepdim=False) fn = fn.sum(axes, keepdim=False) tn = tn.sum(axes, keepdim=False) return tp, fp, fn, tn class Model(nn.Module): def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.0, square=False, square_volumes=False, loss_weight=1.0): """ square_volumes will square the weight term. The paper recommends square_volumes=True; I don't (just an intuition) """ super().__init__() self.square_volumes = square_volumes self.square = square self.do_bg = do_bg self.batch_dice = batch_dice self.apply_nonlin = apply_nonlin self.smooth = smooth self.loss_weight = loss_weight def forward(self, x, y, loss_mask=None): shp_x = x.shape shp_y = y.shape if self.batch_dice: axes = [0] + list(range(2, len(shp_x))) else: axes = list(range(2, len(shp_x))) if len(shp_x) != len(shp_y): y = y.view((shp_y[0], 1, *shp_y[1:])) if all([(i == j) for i, j in zip(x.shape, y.shape)]): y_onehot = y else: gt = y.long() y_onehot = torch.zeros(shp_x) if x.device.type == 'cuda': y_onehot = y_onehot y_onehot.scatter_(1, gt, 1) if self.apply_nonlin is not None: x = self.apply_nonlin(x) if not self.do_bg: x = x[:, 1:] y_onehot = y_onehot[:, 1:] tp, fp, fn, _ = get_tp_fp_fn_tn(x, y_onehot, axes, loss_mask, self. square) volumes = y_onehot.sum(axes) + 1e-06 if self.square_volumes: volumes = volumes ** 2 tp = tp / volumes fp = fp / volumes fn = fn / volumes if self.batch_dice: axis = 0 else: axis = 1 tp = tp.sum(axis, keepdim=False) fp = fp.sum(axis, keepdim=False) fn = fn.sum(axis, keepdim=False) dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth) dc = dc.mean() return -dc * # ... truncated (>4000 chars) for memory efficiency
ImageCleanModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/3d/c3dyw6akmjhpp35jaqq3z7ozrdt7ywexn7u7aab2ekphhcccz3vm.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_5 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %relu_4], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 98304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 4096) % 6 x0 = xindex % 4096 x2 = (xindex // 24576) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (12288*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 6, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-3) + x1)) + (12288*x2)), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + ((-3) + x1), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ow/cownydwjrbxdahpvenvupljtyufltq67f2fj5gt5o4scl4zayria.py # Topologically Sorted Source Nodes: [conv2d_5, x_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_5 => convolution_5 # x_6 => relu_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/pm/cpmebq3upbvorlknkawxxg47exa2esmp72fr4oergswfas6nzozi.py # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_10 => convolution_9 # Graph fragment: # %convolution_9 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_8, %primals_20, %primals_21, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/2w/c2wfghmr7ifgp5lwp4xbh6uilviyquukn5atkpgsuzqigf2neqhl.py # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_11 => convolution_10 # Graph fragment: # %convolution_10 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_9, %primals_22, %primals_23, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/q3/cq3dhejhbflsq6b3s24hodc5wf6onjm5p2hkf2ioehmsukht5nqn.py # Topologically Sorted Source Nodes: [conv2d_11, x_12], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_11 => convolution_11 # x_12 => relu_9 # Graph fragment: # %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {}) triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/kd/ckdlwibiqaazskvdjvxwkd4indaw5sheqpx2sp4xc2kozraimzub.py # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_15 => convolution_14 # Graph fragment: # %convolution_14 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_11, %primals_30, %primals_31, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/fq/cfqx26sdcndsc5gatytwl4euxap46grc7jzzcn5mvscdw6nqn7df.py # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_16 => convolution_15 # Graph fragment: # %convolution_15 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_14, %primals_32, %primals_33, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/l7/cl7bz6ohn2mrdkd6u6pn34feultswfgsfww5c4mlqufw3ceah7ms.py # Topologically Sorted Source Nodes: [conv2d_16, x_17], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_16 => convolution_16 # x_17 => relu_12 # Graph fragment: # %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_15, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_12 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_16,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/a6/ca6awxcje2i7eam7k7hhbngb5arvbk47bz6rxrralf2ykykdcxne.py # Topologically Sorted Source Nodes: [conv2d_22, x_23], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_22 => convolution_22 # x_23 => relu_18 # Graph fragment: # %convolution_22 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_17, %primals_46, %primals_47, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_18 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_22,), kwargs = {}) triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xt/cxtft4hhlq5frjmrjznvv5fbmoy47cdz3ukveuea4agzx6kpgjf5.py # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add] # Source node to ATen node mapping: # x_25 => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_23, %relu_11), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/lv/clvbt33fgl6tz5gt2yhczjt3vfnqg42hfa6zzs2n22qa2ou2lm4j.py # Topologically Sorted Source Nodes: [conv2d_25, x_28], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_25 => convolution_26 # x_28 => relu_21 # Graph fragment: # %convolution_26 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_20, %primals_53, %primals_54, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_21 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_26,), kwargs = {}) triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/y6/cy6gjd2h3soz3je2nnahvlj34xy2he6ak4idn7hv34stk63ito3c.py # Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.add] # Source node to ATen node mapping: # x_30 => add_1 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_27, %relu_8), kwargs = {}) triton_poi_fused_add_12 = async_compile.triton('triton_poi_fused_add_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (x0), None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/i5/ci5nruo7op4b4ryylrbr6xopw5avdv6wbkjcclosup2d6uh37r6e.py # Topologically Sorted Source Nodes: [x_33, y, clamp], Original ATen: [aten.convolution, aten.add, aten.clamp, aten.ge, aten.le, aten.logical_and] # Source node to ATen node mapping: # clamp => clamp_max, clamp_min # x_33 => convolution_30 # y => add_2 # Graph fragment: # %convolution_30 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_23, %primals_60, %primals_61, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %convolution_30), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_2, 0.0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_2, 0.0), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%add_2, 1.0), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {}) triton_poi_fused_add_clamp_convolution_ge_le_logical_and_13 = async_compile.triton('triton_poi_fused_add_clamp_convolution_ge_le_logical_and_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_convolution_ge_le_logical_and_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_convolution_ge_le_logical_and_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x3), None) tmp2 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 1.0 tmp8 = triton_helpers.minimum(tmp6, tmp7) tmp9 = tmp4 >= tmp5 tmp10 = tmp4 <= tmp7 tmp11 = tmp9 & tmp10 tl.store(out_ptr0 + (x3), tmp8, None) tl.store(out_ptr1 + (x3), tmp11, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/pz/cpz2mf53nsbgomqucnhnmpjmugy2wxofag7ddkjo3uawvsnktnsj.py # Topologically Sorted Source Nodes: [conv2d_4, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_4 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %le_20 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_14 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 49152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 3 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61 = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32, ), (1, )) assert_size_stride(primals_10, (3, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (3, ), (1, )) assert_size_stride(primals_12, (64, 6, 3, 3), (54, 9, 3, 1)) assert_size_stride(primals_13, (64, ), (1, )) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64, ), (1, )) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64, ), (1, )) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64, ), (1, )) assert_size_stride(primals_20, (256, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_21, (256, ), (1, )) assert_size_stride(primals_22, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_23, (128, ), (1, )) assert_size_stride(primals_24, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_25, (128, ), (1, )) assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_27, (128, ), (1, )) assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_29, (128, ), (1, )) assert_size_stride(primals_30, (512, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_31, (512, ), (1, )) assert_size_stride(primals_32, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_33, (256, ), (1, )) assert_size_stride(primals_34, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (256, ), (1, )) assert_size_stride(primals_36, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (256, ), (1, )) assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (256, ), (1, )) assert_size_stride(primals_40, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_41, (256, ), (1, )) assert_size_stride(primals_42, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_43, (256, ), (1, )) assert_size_stride(primals_44, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_45, (256, ), (1, )) assert_size_stride(primals_46, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_47, (512, ), (1, )) assert_size_stride(primals_48, (512, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_49, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_50, (128, ), (1, )) assert_size_stride(primals_51, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_52, (128, ), (1, )) assert_size_stride(primals_53, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_54, (256, ), (1, )) assert_size_stride(primals_55, (256, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_56, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_57, (64, ), (1, )) assert_size_stride(primals_58, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_59, (64, ), (1, )) assert_size_stride(primals_60, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_61, (3, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf5, primals_7, 524288, grid=grid(524288), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf7, primals_9, 524288, grid=grid(524288), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf9 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_3, buf8, primals_11, buf9, 98304, grid=grid(98304), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [conv2d_5, x_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf11, primals_13, 1048576, grid=grid(1048576), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d_6, x_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf13, primals_15, 1048576, grid=grid(1048576), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d_7, x_8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf15, primals_17, 1048576, grid=grid(1048576), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [conv2d_8, x_9], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf17, primals_19, 1048576, grid=grid(1048576), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf17, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] triton_poi_fused_convolution_3.run(buf19, primals_21, 1048576, grid=grid(1048576), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf21, primals_23, 524288, grid=grid(524288), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [conv2d_11, x_12], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf23, primals_25, 524288, grid=grid(524288), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [conv2d_12, x_13], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf25, primals_27, 524288, grid=grid(524288), stream=stream0) del primals_27 # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [conv2d_13, x_14], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf27, primals_29, 524288, grid=grid(524288), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 16, 16), (131072, 256, 16, 1)) buf29 = buf28; del buf28 # reuse # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf29, primals_31, 524288, grid=grid(524288), stream=stream0) del primals_31 # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 256, 16, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf31, primals_33, 262144, grid=grid(262144), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 16, 16), (65536, 256, 16, 1)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [conv2d_16, x_17], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf33, primals_35, 262144, grid=grid(262144), stream=stream0) del primals_35 # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf33, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 256, 16, 16), (65536, 256, 16, 1)) buf35 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [conv2d_17, x_18], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf35, primals_37, 262144, grid=grid(262144), stream=stream0) del primals_37 # Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 256, 16, 16), (65536, 256, 16, 1)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [conv2d_18, x_19], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf37, primals_39, 262144, grid=grid(262144), stream=stream0) del primals_39 # Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 256, 16, 16), (65536, 256, 16, 1)) buf39 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [conv2d_19, x_20], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf39, primals_41, 262144, grid=grid(262144), stream=stream0) del primals_41 # Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 256, 16, 16), (65536, 256, 16, 1)) buf41 = buf40; del buf40 # reuse # Topologically Sorted Source Nodes: [conv2d_20, x_21], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf41, primals_43, 262144, grid=grid(262144), stream=stream0) del primals_43 # Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf41, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 256, 16, 16), (65536, 256, 16, 1)) buf43 = buf42; del buf42 # reuse # Topologically Sorted Source Nodes: [conv2d_21, x_22], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf43, primals_45, 262144, grid=grid(262144), stream=stream0) del primals_45 # Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution] buf44 = extern_kernels.convolution(buf43, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 16, 16), (131072, 256, 16, 1)) buf45 = buf44; del buf44 # reuse # Topologically Sorted Source Nodes: [conv2d_22, x_23], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_9.run(buf45, primals_47, 524288, grid=grid(524288), stream=stream0) del primals_47 # Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution] buf46 = extern_kernels.convolution(buf45, primals_48, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf47 = buf46; del buf46 # reuse # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf47, buf27, 524288, grid=grid(524288), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_23], Original ATen: [aten.convolution] buf48 = extern_kernels.convolution(buf47, primals_49, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf49 = buf48; del buf48 # reuse # Topologically Sorted Source Nodes: [conv2d_23, x_26], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf49, primals_50, 524288, grid=grid(524288), stream=stream0) del primals_50 # Topologically Sorted Source Nodes: [conv2d_24], Original ATen: [aten.convolution] buf50 = extern_kernels.convolution(buf49, primals_51, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf51 = buf50; del buf50 # reuse # Topologically Sorted Source Nodes: [conv2d_24, x_27], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_5.run(buf51, primals_52, 524288, grid=grid(524288), stream=stream0) del primals_52 # Topologically Sorted Source Nodes: [conv2d_25], Original ATen: [aten.convolution] buf52 = extern_kernels.convolution(buf51, primals_53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf53 = buf52; del buf52 # reuse # Topologically Sorted Source Nodes: [conv2d_25, x_28], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_11.run(buf53, primals_54, 1048576, grid=grid(1048576), stream=stream0) del primals_54 # Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.convolution] buf54 = extern_kernels.convolution(buf53, primals_55, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf55 = buf54; del buf54 # reuse # Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.add] triton_poi_fused_add_12.run(buf55, buf17, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_26], Original ATen: [aten.convolution] buf56 = extern_kernels.convolution(buf55, primals_56, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf57 = buf56; del buf56 # reuse # Topologically Sorted Source Nodes: [conv2d_26, x_31], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf57, primals_57, 1048576, grid=grid(1048576), stream=stream0) del primals_57 # Topologically Sorted Source Nodes: [conv2d_27], Original ATen: [aten.convolution] buf58 = extern_kernels.convolution(buf57, primals_58, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf58, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf59 = buf58; del buf58 # reuse # Topologically Sorted Source Nodes: [conv2d_27, x_32], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf59, primals_59, 1048576, grid=grid(1048576), stream=stream0) del primals_59 # Topologically Sorted Source Nodes: [x_33], Original ATen: [aten.convolution] buf60 = extern_kernels.convolution(buf59, primals_60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf60, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf61 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) buf62 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_33, y, clamp], Original ATen: [aten.convolution, aten.add, aten.clamp, aten.ge, aten.le, aten.logical_and] triton_poi_fused_add_clamp_convolution_ge_le_logical_and_13.run(primals_3, buf60, primals_61, buf61, buf62, 49152, grid=grid(49152), stream=stream0) del buf60 del primals_61 buf63 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_4, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_14.run(buf8, primals_11, buf63, 49152, grid=grid(49152), stream=stream0) del buf8 del primals_11 return (buf61, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_49, primals_51, primals_53, primals_55, primals_56, primals_58, primals_60, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51, buf53, buf55, buf57, buf59, buf62, buf63, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((3, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((64, 6, 3, 3), (54, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((256, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((512, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((256, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_48 = rand_strided((512, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_49 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_50 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_51 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_52 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_53 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_54 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_55 = rand_strided((256, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_56 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_57 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_58 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_59 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_60 = rand_strided((3, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_61 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ImageCleanModel(nn.Module): """ImageClean Model.""" def __init__(self): """Init model.""" super(ImageCleanModel, self).__init__() self.relu = nn.ReLU(inplace=True) self.E01 = nn.Conv2d(3, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E02 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E03 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E04 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E05 = nn.Conv2d(32, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer00 = nn.Conv2d(6, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer02 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer03 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS02 = nn.Conv2d(64, 256, kernel_size=[2, 2], stride=(2, 2)) self.DS02_layer00_cf = nn.Conv2d(256, 128, kernel_size=[1, 1], stride=(1, 1)) self.DS02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer02 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03 = nn.Conv2d(128, 512, kernel_size=[2, 2], stride=(2, 2)) self.DS03_layer00_cf = nn.Conv2d(512, 256, kernel_size=[1, 1], stride=(1, 1)) self.DS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.UPS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer03 = nn.Conv2d(256, 512, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.USP02 = nn.ConvTranspose2d(512, 128, kernel_size=[2, 2], stride=(2, 2), bias=False) self.US02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer02 = nn.Conv2d(128, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.USP01 = nn.ConvTranspose2d(256, 64, kernel_size=[2, 2], stride =(2, 2), bias=False) self.US01_layer00 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer02 = nn.Conv2d(64, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) def forward(self, input): x = self.relu(self.E01(input)) x = self.relu(self.E02(x)) x = self.relu(self.E03(x)) x = self.relu(self.E04(x)) x = self.relu(self.E05(x)) noise_level = x x = torch.cat((input, noise_level), dim=1) x = self.relu(self.DS01_layer00(x)) x = self.relu(self.DS01_layer01(x)) x = self.relu(self.DS01_layer02(x)) x = self.relu(self.DS01_layer03(x)) down1_result = x x = self.DS02(down1_result) x = self.DS02_layer00_cf(x) x = self.relu(self.DS02_layer00(x)) x = self.relu(self.DS02_layer01(x)) x = self.relu(self.DS02_layer02(x)) down2_result = x x = self.DS03(down2_result) x = self.DS03_layer00_cf(x) x = self.relu(self.DS03_layer00(x)) x = self.relu(self.DS03_layer01(x)) x = self.relu(self.DS03_layer02(x)) x = self.relu(self.UPS03_layer00(x)) x = self.relu(self.UPS03_layer01(x)) x = self.relu(self.UPS03_layer02(x)) x = self.relu(self.UPS03_layer03(x)) x = self.USP02(x) x = torch.add(x, down2_result, alpha=1) del down2_result x = self.relu(self.US02_layer00(x)) x = self.relu(self.US02_layer01(x)) x = self.relu(self.US02_layer02(x)) x = self.USP01(x) x = torch.add(x, down1_result, alpha=1) del down1_result x = self.relu(self.US01_layer00(x)) x = self.relu(self.US01_layer01(x)) x = self.US01_layer02(x) y = torch.add(input, x, alpha=1) del x return y.clamp(0.0, 1.0) def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 6 x0 = xindex % 4096 x2 = xindex // 24576 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 12288 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 6, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-3 + x1) + 12288 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-3 + x1), tmp6, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + x0, None) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_add_clamp_convolution_ge_le_logical_and_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 1.0 tmp8 = triton_helpers.minimum(tmp6, tmp7) tmp9 = tmp4 >= tmp5 tmp10 = tmp4 <= tmp7 tmp11 = tmp9 & tmp10 tl.store(out_ptr0 + x3, tmp8, None) tl.store(out_ptr1 + x3, tmp11, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (3, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (3,), (1,)) assert_size_stride(primals_12, (64, 6, 3, 3), (54, 9, 3, 1)) assert_size_stride(primals_13, (64,), (1,)) assert_size_stride(primals_14, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_15, (64,), (1,)) assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_17, (64,), (1,)) assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (256, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_21, (256,), (1,)) assert_size_stride(primals_22, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_23, (128,), (1,)) assert_size_stride(primals_24, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_25, (128,), (1,)) assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_27, (128,), (1,)) assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_29, (128,), (1,)) assert_size_stride(primals_30, (512, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_31, (512,), (1,)) assert_size_stride(primals_32, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_33, (256,), (1,)) assert_size_stride(primals_34, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_35, (256,), (1,)) assert_size_stride(primals_36, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_37, (256,), (1,)) assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (256,), (1,)) assert_size_stride(primals_40, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_41, (256,), (1,)) assert_size_stride(primals_42, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_43, (256,), (1,)) assert_size_stride(primals_44, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_45, (256,), (1,)) assert_size_stride(primals_46, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_47, (512,), (1,)) assert_size_stride(primals_48, (512, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_49, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_50, (128,), (1,)) assert_size_stride(primals_51, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_52, (128,), (1,)) assert_size_stride(primals_53, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_54, (256,), (1,)) assert_size_stride(primals_55, (256, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_56, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_57, (64,), (1,)) assert_size_stride(primals_58, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_59, (64,), (1,)) assert_size_stride(primals_60, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_61, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(524288)](buf3, primals_5, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_0[grid(524288)](buf5, primals_7, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_0[grid(524288)](buf7, primals_9, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf9 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) triton_poi_fused_cat_1[grid(98304)](primals_3, buf8, primals_11, buf9, 98304, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_2[grid(1048576)](buf11, primals_13, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_2[grid(1048576)](buf13, primals_15, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_2[grid(1048576)](buf15, primals_17, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_2[grid(1048576)](buf17, primals_19, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf18 = extern_kernels.convolution(buf17, primals_20, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_3[grid(1048576)](buf19, primals_21, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf20 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_4[grid(524288)](buf21, primals_23, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_5[grid(524288)](buf23, primals_25, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_5[grid(524288)](buf25, primals_27, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 buf26 = extern_kernels.convolution(buf25, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_relu_5[grid(524288)](buf27, primals_29, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_29 buf28 = extern_kernels.convolution(buf27, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 16, 16), (131072, 256, 16, 1)) buf29 = buf28 del buf28 triton_poi_fused_convolution_6[grid(524288)](buf29, primals_31, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf30 = extern_kernels.convolution(buf29, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 256, 16, 16), (65536, 256, 16, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_7[grid(262144)](buf31, primals_33, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_33 buf32 = extern_kernels.convolution(buf31, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 256, 16, 16), (65536, 256, 16, 1)) buf33 = buf32 del buf32 triton_poi_fused_convolution_relu_8[grid(262144)](buf33, primals_35, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_35 buf34 = extern_kernels.convolution(buf33, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 256, 16, 16), (65536, 256, 16, 1)) buf35 = buf34 del buf34 triton_poi_fused_convolution_relu_8[grid(262144)](buf35, primals_37, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_37 buf36 = extern_kernels.convolution(buf35, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 256, 16, 16), (65536, 256, 16, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_8[grid(262144)](buf37, primals_39, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_39 buf38 = extern_kernels.convolution(buf37, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 256, 16, 16), (65536, 256, 16, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_8[grid(262144)](buf39, primals_41, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_41 buf40 = extern_kernels.convolution(buf39, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 256, 16, 16), (65536, 256, 16, 1)) buf41 = buf40 del buf40 triton_poi_fused_convolution_relu_8[grid(262144)](buf41, primals_43, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_43 buf42 = extern_kernels.convolution(buf41, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 256, 16, 16), (65536, 256, 16, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_relu_8[grid(262144)](buf43, primals_45, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_45 buf44 = extern_kernels.convolution(buf43, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 512, 16, 16), (131072, 256, 16, 1)) buf45 = buf44 del buf44 triton_poi_fused_convolution_relu_9[grid(524288)](buf45, primals_47, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_47 buf46 = extern_kernels.convolution(buf45, primals_48, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf47 = buf46 del buf46 triton_poi_fused_add_10[grid(524288)](buf47, buf27, 524288, XBLOCK= 512, num_warps=8, num_stages=1) buf48 = extern_kernels.convolution(buf47, primals_49, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf49 = buf48 del buf48 triton_poi_fused_convolution_relu_5[grid(524288)](buf49, primals_50, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_50 buf50 = extern_kernels.convolution(buf49, primals_51, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 128, 32, 32), (131072, 1024, 32, 1)) buf51 = buf50 del buf50 triton_poi_fused_convolution_relu_5[grid(524288)](buf51, primals_52, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_52 buf52 = extern_kernels.convolution(buf51, primals_53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf53 = buf52 del buf52 triton_poi_fused_convolution_relu_11[grid(1048576)](buf53, primals_54, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_54 buf54 = extern_kernels.convolution(buf53, primals_55, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf55 = buf54 del buf54 triton_poi_fused_add_12[grid(1048576)](buf55, buf17, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) buf56 = extern_kernels.convolution(buf55, primals_56, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf57 = buf56 del buf56 triton_poi_fused_convolution_relu_2[grid(1048576)](buf57, primals_57, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_57 buf58 = extern_kernels.convolution(buf57, primals_58, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf58, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf59 = buf58 del buf58 triton_poi_fused_convolution_relu_2[grid(1048576)](buf59, primals_59, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_59 buf60 = extern_kernels.convolution(buf59, primals_60, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf60, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf61 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) buf62 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_add_clamp_convolution_ge_le_logical_and_13[grid(49152) ](primals_3, buf60, primals_61, buf61, buf62, 49152, XBLOCK=256, num_warps=4, num_stages=1) del buf60 del primals_61 buf63 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_14[grid(49152)]( buf8, primals_11, buf63, 49152, XBLOCK=256, num_warps=4, num_stages=1) del buf8 del primals_11 return (buf61, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_49, primals_51, primals_53, primals_55, primals_56, primals_58, primals_60, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51, buf53, buf55, buf57, buf59, buf62, buf63) class ImageCleanModelNew(nn.Module): """ImageClean Model.""" def __init__(self): """Init model.""" super(ImageCleanModelNew, self).__init__() self.relu = nn.ReLU(inplace=True) self.E01 = nn.Conv2d(3, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E02 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E03 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E04 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E05 = nn.Conv2d(32, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer00 = nn.Conv2d(6, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer02 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer03 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS02 = nn.Conv2d(64, 256, kernel_size=[2, 2], stride=(2, 2)) self.DS02_layer00_cf = nn.Conv2d(256, 128, kernel_size=[1, 1], stride=(1, 1)) self.DS02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer02 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03 = nn.Conv2d(128, 512, kernel_size=[2, 2], stride=(2, 2)) self.DS03_layer00_cf = nn.Conv2d(512, 256, kernel_size=[1, 1], stride=(1, 1)) self.DS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.UPS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer03 = nn.Conv2d(256, 512, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.USP02 = nn.ConvTranspose2d(512, 128, kernel_size=[2, 2], stride=(2, 2), bias=False) self.US02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer02 = nn.Conv2d(128, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.USP01 = nn.ConvTranspose2d(256, 64, kernel_size=[2, 2], stride =(2, 2), bias=False) self.US01_layer00 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer02 = nn.Conv2d(64, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) def forward(self, input_0): primals_1 = self.E01.weight primals_2 = self.E01.bias primals_4 = self.E02.weight primals_5 = self.E02.bias primals_6 = self.E03.weight primals_7 = self.E03.bias primals_8 = self.E04.weight primals_9 = self.E04.bias primals_10 = self.E05.weight primals_11 = self.E05.bias primals_12 = self.DS01_layer00.weight primals_13 = self.DS01_layer00.bias primals_14 = self.DS01_layer01.weight primals_15 = self.DS01_layer01.bias primals_16 = self.DS01_layer02.weight primals_17 = self.DS01_layer02.bias primals_18 = self.DS01_layer03.weight primals_19 = self.DS01_layer03.bias primals_20 = self.DS02.weight primals_21 = self.DS02.bias primals_22 = self.DS02_layer00_cf.weight primals_23 = self.DS02_layer00_cf.bias primals_24 = self.DS02_layer00.weight primals_25 = self.DS02_layer00.bias primals_26 = self.DS02_layer01.weight primals_27 = self.DS02_layer01.bias primals_28 = self.DS02_layer02.weight primals_29 = self.DS02_layer02.bias primals_30 = self.DS03.weight primals_31 = self.DS03.bias primals_32 = self.DS03_layer00_cf.weight primals_33 = self.DS03_layer00_cf.bias primals_34 = self.DS03_layer00.weight primals_35 = self.DS03_layer00.bias primals_36 = self.DS03_layer01.weight primals_37 = self.DS03_layer01.bias primals_38 = self.DS03_layer02.weight primals_39 = self.DS03_layer02.bias primals_40 = self.UPS03_layer00.weight primals_41 = self.UPS03_layer00.bias primals_42 = self.UPS03_layer01.weight primals_43 = self.UPS03_layer01.bias primals_44 = self.UPS03_layer02.weight primals_45 = self.UPS03_layer02.bias primals_46 = self.UPS03_layer03.weight primals_47 = self.UPS03_layer03.bias primals_48 = self.USP02.weight primals_49 = self.US02_layer00.weight primals_50 = self.US02_layer00.bias primals_51 = self.US02_layer01.weight primals_52 = self.US02_layer01.bias primals_53 = self.US02_layer02.weight primals_54 = self.US02_layer02.bias primals_55 = self.USP01.weight primals_56 = self.US01_layer00.weight primals_57 = self.US01_layer00.bias primals_58 = self.US01_layer01.weight primals_59 = self.US01_layer01.bias primals_60 = self.US01_layer02.weight primals_61 = self.US01_layer02.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61]) return output[0]
delldu/ImageClean
ImageCleanModel
false
1,859
[ "MIT" ]
0
ffa5b180d36afb3840c6b36c08a767c520068498
https://github.com/delldu/ImageClean/tree/ffa5b180d36afb3840c6b36c08a767c520068498
import torch import torch.nn as nn class Model(nn.Module): """ImageClean Model.""" def __init__(self): """Init model.""" super().__init__() self.relu = nn.ReLU(inplace=True) self.E01 = nn.Conv2d(3, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E02 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E03 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E04 = nn.Conv2d(32, 32, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.E05 = nn.Conv2d(32, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer00 = nn.Conv2d(6, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer02 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS01_layer03 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.DS02 = nn.Conv2d(64, 256, kernel_size=[2, 2], stride=(2, 2)) self.DS02_layer00_cf = nn.Conv2d(256, 128, kernel_size=[1, 1], stride=(1, 1)) self.DS02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS02_layer02 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03 = nn.Conv2d(128, 512, kernel_size=[2, 2], stride=(2, 2)) self.DS03_layer00_cf = nn.Conv2d(512, 256, kernel_size=[1, 1], stride=(1, 1)) self.DS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.DS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.UPS03_layer00 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer01 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer02 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.UPS03_layer03 = nn.Conv2d(256, 512, kernel_size=[3, 3], stride =(1, 1), padding=(1, 1)) self.USP02 = nn.ConvTranspose2d(512, 128, kernel_size=[2, 2], stride=(2, 2), bias=False) self.US02_layer00 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer01 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.US02_layer02 = nn.Conv2d(128, 256, kernel_size=[3, 3], stride= (1, 1), padding=(1, 1)) self.USP01 = nn.ConvTranspose2d(256, 64, kernel_size=[2, 2], stride =(2, 2), bias=False) self.US01_layer00 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer01 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) self.US01_layer02 = nn.Conv2d(64, 3, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1)) def forward(self, input): x = self.relu(self.E01(input)) x = self.relu(self.E02(x)) x = self.relu(self.E03(x)) x = self.relu(self.E04(x)) x = self.relu(self.E05(x)) noise_level = x x = torch.cat((input, noise_level), dim=1) x = self.relu(self.DS01_layer00(x)) x = self.relu(self.DS01_layer01(x)) x = self.relu(self.DS01_layer02(x)) x = self.relu(self.DS01_layer03(x)) down1_result = x x = self.DS02(d # ... truncated (>4000 chars) for memory efficiency
NeuralGasEnergy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/gm/cgmv3kushujtrxmrvjkj5mflg3mhmwtkce5yytcqzjlqzishwtje.py # Topologically Sorted Source Nodes: [order], Original ATen: [aten.sort] # Source node to ATen node mapping: # order => sort # Graph fragment: # %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%arg0_1, 1), kwargs = {}) triton_per_fused_sort_0 = async_compile.triton('triton_per_fused_sort_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i16', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0) tmp1 = r2 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5, tmp6, = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=False) tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/3z/c3zc44ixvjz4tqf4ak7iarf4kftsnhod45dnadnjdnnvv446uewn.py # Topologically Sorted Source Nodes: [order], Original ATen: [aten.sort] # Source node to ATen node mapping: # order => getitem_1 # Graph fragment: # %getitem_1 : [num_users=2] = call_function[target=operator.getitem](args = (%sort, 1), kwargs = {}) triton_poi_fused_sort_1 = async_compile.triton('triton_poi_fused_sort_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*i16', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sort_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sort_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0.to(tl.int64) tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/qs/cqsap5scohcpr3uawf32gosg725ab4m2ybzogoyuiqhmbrmp2qqs.py # Topologically Sorted Source Nodes: [ranks], Original ATen: [aten.sort] # Source node to ATen node mapping: # ranks => sort_1 # Graph fragment: # %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%getitem_1, 1), kwargs = {}) triton_per_fused_sort_2 = async_compile.triton('triton_per_fused_sort_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*i16', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sort_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_sort_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0) tmp1 = r2 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5, tmp6, = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=False) tl.store(out_ptr0 + (x0 + (16*r2) + (64*x1)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/wa/cwarus5mfuv46rsvmqw6mwohxacyhcgtkbiajhajd6jii3kb3a2n.py # Topologically Sorted Source Nodes: [ranks, neg, truediv, exp, mul, cost], Original ATen: [aten.sort, aten.neg, aten.div, aten.exp, aten.mul, aten.sum] # Source node to ATen node mapping: # cost => sum_1 # exp => exp # mul => mul # neg => neg # ranks => sort_1 # truediv => div # Graph fragment: # %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%getitem_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%getitem_3,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, 4), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %arg0_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) triton_per_fused_div_exp_mul_neg_sort_sum_3 = async_compile.triton('triton_per_fused_div_exp_mul_neg_sort_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*i16', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_mul_neg_sort_sum_3', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_exp_mul_neg_sort_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp7 = tl.load(in_ptr1 + (r0), None) tmp1 = tmp0.to(tl.int64) tmp2 = -tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = 0.25 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16) # Topologically Sorted Source Nodes: [order], Original ATen: [aten.sort] stream0 = get_raw_stream(0) triton_per_fused_sort_0.run(arg0_1, buf1, 64, 4, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [order], Original ATen: [aten.sort] triton_poi_fused_sort_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf4 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [ranks], Original ATen: [aten.sort] triton_per_fused_sort_2.run(buf2, buf4, 64, 4, grid=grid(64), stream=stream0) buf5 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [ranks, neg, truediv, exp, mul, cost], Original ATen: [aten.sort, aten.neg, aten.div, aten.exp, aten.mul, aten.sum] triton_per_fused_div_exp_mul_neg_sort_sum_3.run(buf4, arg0_1, buf5, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del buf4 return (buf5, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class NeuralGasEnergy(torch.nn.Module): def __init__(self, lm): super().__init__() self.lm = lm def forward(self, d): order = torch.argsort(d, dim=1) ranks = torch.argsort(order, dim=1) cost = torch.sum(self._nghood_fn(ranks, self.lm) * d) return cost, order def extra_repr(self): return f'lambda: {self.lm}' @staticmethod def _nghood_fn(rankings, lm): return torch.exp(-rankings / lm) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'lm': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_sort_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0) tmp1 = r2 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) _tmp5, tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=False) tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp6, xmask) @triton.jit def triton_poi_fused_sort_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0.to(tl.int64) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_per_fused_sort_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0) tmp1 = r2 tmp2 = tmp1.to(tl.int16) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) _tmp5, tmp6 = triton_helpers.sort_with_index(tmp3, tmp4, None, 1, stable=False, descending=False) tl.store(out_ptr0 + (x0 + 16 * r2 + 64 * x1), tmp6, xmask) @triton.jit def triton_per_fused_div_exp_mul_neg_sort_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp7 = tl.load(in_ptr1 + r0, None) tmp1 = tmp0.to(tl.int64) tmp2 = -tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = 0.25 tmp5 = tmp3 * tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 * tmp7 tmp9 = tl.broadcast_to(tmp8, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16) get_raw_stream(0) triton_per_fused_sort_0[grid(64)](arg0_1, buf1, 64, 4, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) triton_poi_fused_sort_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = buf1 del buf1 triton_per_fused_sort_2[grid(64)](buf2, buf4, 64, 4, XBLOCK=32, num_warps=2, num_stages=1) buf5 = empty_strided_cuda((), (), torch.float32) triton_per_fused_div_exp_mul_neg_sort_sum_3[grid(1)](buf4, arg0_1, buf5, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf4 return buf5, buf2 class NeuralGasEnergyNew(torch.nn.Module): def __init__(self, lm): super().__init__() self.lm = lm def extra_repr(self): return f'lambda: {self.lm}' @staticmethod def _nghood_fn(rankings, lm): return torch.exp(-rankings / lm) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1]
dmoebius-dm/prototorch
NeuralGasEnergy
false
1,860
[ "MIT" ]
0
088429a16a820f31367bb7b780dce0e368633fb2
https://github.com/dmoebius-dm/prototorch/tree/088429a16a820f31367bb7b780dce0e368633fb2
import torch class Model(torch.nn.Module): def __init__(self, lm): super().__init__() self.lm = lm def forward(self, d): order = torch.argsort(d, dim=1) ranks = torch.argsort(order, dim=1) cost = torch.sum(self._nghood_fn(ranks, self.lm) * d) return cost, order def extra_repr(self): return f'lambda: {self.lm}' @staticmethod def _nghood_fn(rankings, lm): return torch.exp(-rankings / lm) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
PatchEmbed
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/zt/cztkrjzjfnkenqtkvtpoo63d7nucri4mdum3kkn2uzy44lvpt2ft.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [4, 4, 4], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1572864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 96 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (96, 1, 4, 4, 4), (64, 64, 16, 4, 1)) assert_size_stride(primals_3, (96, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(4, 4, 4), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 96, 16, 16, 16), (393216, 4096, 256, 16, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 1572864, grid=grid(1572864), stream=stream0) del primals_3 return (buf1, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((96, 1, 4, 4, 4), (64, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization class PatchEmbed(nn.Module): """ Image to Patch Embedding Args: patch_size (tuple[int]): Patch token size. Default: (4, 4, 4). in_chans (int): Number of input image channels. Default: 1. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, patch_size=(4, 4, 4), in_chans=1, embed_dim=96, norm_layer=None, use_spectral_aggregation='None'): super().__init__() self._patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None self.use_spectral_aggregation = use_spectral_aggregation if self.use_spectral_aggregation == 'token': self.spectral_aggregation_token = nn.Parameter(data=torch.empty (embed_dim), requires_grad=True) trunc_normal_(self.spectral_aggregation_token, std=0.02) def forward(self, x): """Forward function.""" if self.use_spectral_aggregation != 'None': x = F.instance_norm(x) x = torch.unsqueeze(x, 1) _, _, S, H, W = x.size() if W % self._patch_size[2] != 0: x = F.pad(x, [0, self._patch_size[2] - W % self._patch_size[2]]) if H % self._patch_size[1] != 0: x = F.pad(x, [0, 0, 0, self._patch_size[1] - H % self. _patch_size[1]]) if S % self._patch_size[0] != 0: x = F.pad(x, [0, 0, 0, 0, 0, self._patch_size[0] - S % self. _patch_size[0]]) x = self.proj(x) if self.use_spectral_aggregation == 'token': _b, _c, _s, _h, _w = x.shape token = self.spectral_aggregation_token.view(1, -1, 1, 1, 1 ).repeat(_b, 1, 1, _h, _w) x = torch.cat((token, x), dim=2) if self.norm is not None: Ws, Wh, Ww = x.size(2), x.size(3), x.size(4) x = x.flatten(2).transpose(1, 2) x = self.norm(x) x = x.transpose(1, 2).view(-1, self.embed_dim, Ws, Wh, Ww) return x def get_inputs(): return [torch.rand([4, 1, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch._C import torch.serialization assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 96 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (96, 1, 4, 4, 4), (64, 64, 16, 4, 1)) assert_size_stride(primals_3, (96,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(4, 4, 4), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 96, 16, 16, 16), (393216, 4096, 256, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(1572864)](buf1, primals_3, 1572864, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 return buf1, primals_1, primals_2 class PatchEmbedNew(nn.Module): """ Image to Patch Embedding Args: patch_size (tuple[int]): Patch token size. Default: (4, 4, 4). in_chans (int): Number of input image channels. Default: 1. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, patch_size=(4, 4, 4), in_chans=1, embed_dim=96, norm_layer=None, use_spectral_aggregation='None'): super().__init__() self._patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None self.use_spectral_aggregation = use_spectral_aggregation if self.use_spectral_aggregation == 'token': self.spectral_aggregation_token = nn.Parameter(data=torch.empty (embed_dim), requires_grad=True) trunc_normal_(self.spectral_aggregation_token, std=0.02) def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dkswxd/Swin-Transformer-Semantic-Segmentation
PatchEmbed
false
1,861
[ "Apache-2.0" ]
0
6af19736e5492a01d8952d4ee86a6d59b21c2ae1
https://github.com/dkswxd/Swin-Transformer-Semantic-Segmentation/tree/6af19736e5492a01d8952d4ee86a6d59b21c2ae1
import torch import torch.nn.functional as F import torch.nn as nn import torch._C import torch.serialization class Model(nn.Module): """ Image to Patch Embedding Args: patch_size (tuple[int]): Patch token size. Default: (4, 4, 4). in_chans (int): Number of input image channels. Default: 1. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, patch_size=(4, 4, 4), in_chans=1, embed_dim=96, norm_layer=None, use_spectral_aggregation='None'): super().__init__() self._patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None self.use_spectral_aggregation = use_spectral_aggregation if self.use_spectral_aggregation == 'token': self.spectral_aggregation_token = nn.Parameter(data=torch.empty (embed_dim), requires_grad=True) trunc_normal_(self.spectral_aggregation_token, std=0.02) def forward(self, x): """Forward function.""" if self.use_spectral_aggregation != 'None': x = F.instance_norm(x) x = torch.unsqueeze(x, 1) _, _, S, H, W = x.size() if W % self._patch_size[2] != 0: x = F.pad(x, [0, self._patch_size[2] - W % self._patch_size[2]]) if H % self._patch_size[1] != 0: x = F.pad(x, [0, 0, 0, self._patch_size[1] - H % self. _patch_size[1]]) if S % self._patch_size[0] != 0: x = F.pad(x, [0, 0, 0, 0, 0, self._patch_size[0] - S % self. _patch_size[0]]) x = self.proj(x) if self.use_spectral_aggregation == 'token': _b, _c, _s, _h, _w = x.shape token = self.spectral_aggregation_token.view(1, -1, 1, 1, 1 ).repeat(_b, 1, 1, _h, _w) x = torch.cat((token, x), dim=2) if self.norm is not None: Ws, Wh, Ww = x.size(2), x.size(3), x.size(4) x = x.flatten(2).transpose(1, 2) x = self.norm(x) x = x.transpose(1, 2).view(-1, self.embed_dim, Ws, Wh, Ww) return x def get_inputs(): return [torch.rand([4, 1, 64, 64, 64])] def get_init_inputs(): return []
MNISTmodel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/jo/cjofd5z4dxar3hrs2mtbo6ielcpo5qfoz5e7szgk5223qqyf4wfy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1600 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 20 y1 = (yindex // 20) tmp0 = tl.load(in_ptr0 + (x2 + (64*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (20*x2) + (1280*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/et/cetk4nv4xhiobqu367vpvtmoqpch77aw3pjimoicgzr6pb3hnuhi.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 80 xnumel = 3249 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 20 y1 = (yindex // 20) tmp0 = tl.load(in_ptr0 + (x2 + (3249*y3)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + (20*x2) + (64980*y1)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jz/cjz2kuudiauwo7xsgvjrtx66xilfjlrngfx2mc7i5brazzi7rkbu.py # Topologically Sorted Source Nodes: [max_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.relu] # Source node to ATen node mapping: # max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1 # x => relu # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [1, 1], [1, 1], [0, 0], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 259920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int8) tmp2 = tl.full([1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp0) tl.store(out_ptr0 + (x0), tmp1, xmask) tl.store(out_ptr1 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/lz/clzsziwmggw3irwt7rniv5j34ozeegxom3ittdyk3itcavxvkjyl.py # Topologically Sorted Source Nodes: [conv2d_1, max_pool2d_1], Original ATen: [aten.convolution, aten.max_pool2d_with_indices] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # max_pool2d_1 => getitem_3 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_convolution_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_convolution_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_max_pool2d_with_indices_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_max_pool2d_with_indices_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 800000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 80 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int8) tl.store(in_out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tl/ctlwfikzjqj2tzfxsbahkbkyzhmgtiswa62f2wsdrgl4clqcmgan.py # Topologically Sorted Source Nodes: [max_pool2d_1, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1 # x_1 => relu_1 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [1, 1], [1, 1], [0, 0], [1, 1], False), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 320 xnumel = 2500 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 80 y1 = (yindex // 80) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (80*x2) + (200000*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + (x2 + (2528*y3)), tmp2, xmask & ymask) tl.store(out_ptr1 + (y0 + (80*x2) + (200000*y1)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/cl/cclx7eadx3isekm5oexkytozpryp2h5ttbwir6frhvtzv346kjbu.py # Topologically Sorted Source Nodes: [max_pool2d_1, x_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.view] # Source node to ATen node mapping: # max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1 # x_1 => relu_1 # x_2 => view # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [1, 1], [1, 1], [0, 0], [1, 1], False), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [4, -1]), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_view_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_view_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_view_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_view_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 800000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 200000 x1 = (xindex // 200000) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2528*(x0 // 2500)) + (202240*x1) + (x0 % 2500)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vt/cvt6synwfekvpysrm7reg7gv4rzuxzdlida6fy4emfeue3qp6o2b.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/wa/cwavavqjyrtrr2nei6l6gsooekwutdp24qjvue3t265bbk3lgpr7.py # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_6 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_relu_threshold_backward_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (20, 1, 8, 8), (64, 64, 8, 1)) assert_size_stride(primals_2, (20, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (80, 20, 8, 8), (1280, 64, 8, 1)) assert_size_stride(primals_5, (80, ), (1, )) assert_size_stride(primals_6, (500, 200000), (200000, 1)) assert_size_stride(primals_7, (500, ), (1, )) assert_size_stride(primals_8, (8, 500), (500, 1)) assert_size_stride(primals_9, (8, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((80, 20, 8, 8), (1280, 1, 160, 20), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_4, buf0, 1600, 64, grid=grid(1600, 64), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 20, 57, 57), (64980, 3249, 57, 1)) buf2 = empty_strided_cuda((4, 20, 57, 57), (64980, 1, 1140, 20), torch.float32) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf1, primals_2, buf2, 80, 3249, grid=grid(80, 3249), stream=stream0) del primals_2 buf3 = empty_strided_cuda((4, 20, 57, 57), (64980, 1, 1140, 20), torch.int8) buf4 = reinterpret_tensor(buf1, (4, 20, 57, 57), (64980, 1, 1140, 20), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [max_pool2d, x], Original ATen: [aten.max_pool2d_with_indices, aten.relu] triton_poi_fused_max_pool2d_with_indices_relu_2.run(buf2, buf3, buf4, 259920, grid=grid(259920), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 80, 50, 50), (200000, 1, 4000, 80)) buf6 = buf5; del buf5 # reuse buf7 = empty_strided_cuda((4, 80, 50, 50), (200000, 1, 4000, 80), torch.int8) # Topologically Sorted Source Nodes: [conv2d_1, max_pool2d_1], Original ATen: [aten.convolution, aten.max_pool2d_with_indices] triton_poi_fused_convolution_max_pool2d_with_indices_3.run(buf6, primals_5, buf7, 800000, grid=grid(800000), stream=stream0) del primals_5 buf8 = empty_strided_cuda((4, 80, 50, 50), (202240, 2528, 50, 1), torch.float32) buf15 = empty_strided_cuda((4, 80, 50, 50), (200000, 1, 4000, 80), torch.bool) # Topologically Sorted Source Nodes: [max_pool2d_1, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4.run(buf6, buf8, buf15, 320, 2500, grid=grid(320, 2500), stream=stream0) buf9 = empty_strided_cuda((4, 200000), (200000, 1), torch.float32) # Topologically Sorted Source Nodes: [max_pool2d_1, x_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.view] triton_poi_fused_max_pool2d_with_indices_relu_view_5.run(buf8, buf9, 800000, grid=grid(800000), stream=stream0) del buf8 buf10 = empty_strided_cuda((4, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf9, reinterpret_tensor(primals_6, (200000, 500), (1, 200000), 0), out=buf10) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_6.run(buf11, primals_7, 2000, grid=grid(2000), stream=stream0) del primals_7 buf12 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf11, reinterpret_tensor(primals_8, (500, 8), (1, 500), 0), out=buf12) buf13 = buf12; del buf12 # reuse buf14 = empty_strided_cuda((4, 8), (8, 1), torch.bool) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_7.run(buf13, primals_9, buf14, 32, grid=grid(32), stream=stream0) del primals_9 return (buf13, primals_1, primals_3, buf0, buf2, buf3, buf4, buf6, buf7, buf9, buf11, buf14, primals_8, primals_6, buf15, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((20, 1, 8, 8), (64, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((80, 20, 8, 8), (1280, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((80, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((500, 200000), (200000, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((8, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class Evidential_layer(nn.Module): def __init__(self, in_dim, num_classes): super(Evidential_layer, self).__init__() self.num_classes = num_classes self.fc1 = nn.Linear(in_dim, 2 * self.num_classes) self.relu = torch.nn.ReLU() def forward(self, x): x = self.fc1(x) return self.relu(x) class MNISTmodel(nn.Module): def __init__(self, num_classes, edl, dropout=True): super(MNISTmodel, self).__init__() self.use_dropout = dropout k, m = 8, 80 km = (64 - 2 * (k - 1)) ** 2 * m self.num_classes = num_classes self.conv1 = nn.Conv2d(1, 20, kernel_size=k) self.conv2 = nn.Conv2d(20, m, kernel_size=k) self.fc1 = nn.Linear(km, 500) if edl: self.fc2 = Evidential_layer(500, self.num_classes) else: self.fc2 = nn.Linear(500, self.num_classes) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 1)) x = F.relu(F.max_pool2d(self.conv2(x), 1)) x = x.view(x.size()[0], -1) x = F.relu(self.fc1(x)) if self.use_dropout: x = F.dropout(x, training=self.training) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4, 'edl': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 1600 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 20 y1 = yindex // 20 tmp0 = tl.load(in_ptr0 + (x2 + 64 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 20 * x2 + 1280 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 80 xnumel = 3249 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 20 y1 = yindex // 20 tmp0 = tl.load(in_ptr0 + (x2 + 3249 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + 20 * x2 + 64980 * y1), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 259920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int8) tmp2 = tl.full([1], 0, tl.int32) tmp3 = triton_helpers.maximum(tmp2, tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) tl.store(out_ptr1 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_convolution_max_pool2d_with_indices_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 800000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 80 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int8) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 320 xnumel = 2500 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 80 y1 = yindex // 80 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 80 * x2 + 200000 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(out_ptr0 + (x2 + 2528 * y3), tmp2, xmask & ymask) tl.store(out_ptr1 + (y0 + 80 * x2 + 200000 * y1), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_view_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 800000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 200000 x1 = xindex // 200000 x2 = xindex tmp0 = tl.load(in_ptr0 + (2528 * (x0 // 2500) + 202240 * x1 + x0 % 2500 ), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 2000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (20, 1, 8, 8), (64, 64, 8, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (80, 20, 8, 8), (1280, 64, 8, 1)) assert_size_stride(primals_5, (80,), (1,)) assert_size_stride(primals_6, (500, 200000), (200000, 1)) assert_size_stride(primals_7, (500,), (1,)) assert_size_stride(primals_8, (8, 500), (500, 1)) assert_size_stride(primals_9, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((80, 20, 8, 8), (1280, 1, 160, 20), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(1600, 64)](primals_4, buf0, 1600, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_4 buf1 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 20, 57, 57), (64980, 3249, 57, 1)) buf2 = empty_strided_cuda((4, 20, 57, 57), (64980, 1, 1140, 20), torch.float32) triton_poi_fused_convolution_1[grid(80, 3249)](buf1, primals_2, buf2, 80, 3249, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf3 = empty_strided_cuda((4, 20, 57, 57), (64980, 1, 1140, 20), torch.int8) buf4 = reinterpret_tensor(buf1, (4, 20, 57, 57), (64980, 1, 1140, 20), 0) del buf1 triton_poi_fused_max_pool2d_with_indices_relu_2[grid(259920)](buf2, buf3, buf4, 259920, XBLOCK=1024, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 80, 50, 50), (200000, 1, 4000, 80)) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 80, 50, 50), (200000, 1, 4000, 80), torch.int8) triton_poi_fused_convolution_max_pool2d_with_indices_3[grid(800000)]( buf6, primals_5, buf7, 800000, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf8 = empty_strided_cuda((4, 80, 50, 50), (202240, 2528, 50, 1), torch.float32) buf15 = empty_strided_cuda((4, 80, 50, 50), (200000, 1, 4000, 80), torch.bool) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4[grid (320, 2500)](buf6, buf8, buf15, 320, 2500, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 200000), (200000, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_view_5[grid(800000)](buf8 , buf9, 800000, XBLOCK=1024, num_warps=4, num_stages=1) del buf8 buf10 = empty_strided_cuda((4, 500), (500, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_6, (200000, 500), (1, 200000), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_6[grid(2000)](buf11, primals_7, 2000, XBLOCK= 256, num_warps=4, num_stages=1) del primals_7 buf12 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(buf11, reinterpret_tensor(primals_8, (500, 8), (1, 500), 0), out=buf12) buf13 = buf12 del buf12 buf14 = empty_strided_cuda((4, 8), (8, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(32)](buf13, primals_9, buf14, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_9 return (buf13, primals_1, primals_3, buf0, buf2, buf3, buf4, buf6, buf7, buf9, buf11, buf14, primals_8, primals_6, buf15) class Evidential_layer(nn.Module): def __init__(self, in_dim, num_classes): super(Evidential_layer, self).__init__() self.num_classes = num_classes self.fc1 = nn.Linear(in_dim, 2 * self.num_classes) self.relu = torch.nn.ReLU() def forward(self, x): x = self.fc1(x) return self.relu(x) class MNISTmodelNew(nn.Module): def __init__(self, num_classes, edl, dropout=True): super(MNISTmodelNew, self).__init__() self.use_dropout = dropout k, m = 8, 80 km = (64 - 2 * (k - 1)) ** 2 * m self.num_classes = num_classes self.conv1 = nn.Conv2d(1, 20, kernel_size=k) self.conv2 = nn.Conv2d(20, m, kernel_size=k) self.fc1 = nn.Linear(km, 500) if edl: self.fc2 = Evidential_layer(500, self.num_classes) else: self.fc2 = nn.Linear(500, self.num_classes) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.fc1.weight primals_9 = self.fc2.fc1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
caisr-hh/DEED
MNISTmodel
false
1,862
[ "MIT" ]
0
2a9edb1df31d99c1e8da177dec696d7c90c2e7de
https://github.com/caisr-hh/DEED/tree/2a9edb1df31d99c1e8da177dec696d7c90c2e7de
import torch import torch.nn.functional as F import torch.nn as nn class Evidential_layer(nn.Module): def __init__(self, in_dim, num_classes): super().__init__() self.num_classes = num_classes self.fc1 = nn.Linear(in_dim, 2 * self.num_classes) self.relu = torch.nn.ReLU() def forward(self, x): x = self.fc1(x) return self.relu(x) class Model(nn.Module): def __init__(self, num_classes, edl, dropout=True): super().__init__() self.use_dropout = dropout k, m = 8, 80 km = (64 - 2 * (k - 1)) ** 2 * m self.num_classes = num_classes self.conv1 = nn.Conv2d(1, 20, kernel_size=k) self.conv2 = nn.Conv2d(20, m, kernel_size=k) self.fc1 = nn.Linear(km, 500) if edl: self.fc2 = Evidential_layer(500, self.num_classes) else: self.fc2 = nn.Linear(500, self.num_classes) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 1)) x = F.relu(F.max_pool2d(self.conv2(x), 1)) x = x.view(x.size()[0], -1) x = F.relu(self.fc1(x)) if self.use_dropout: x = F.dropout(x, training=self.training) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [4, 4]
AngularLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/a4/ca4tmlb2icbhs4x66u6wgz7rnps47aezzfixcy2yeb7asasj4imo.py # Topologically Sorted Source Nodes: [sub, abs_1, tensor, sub_1, abs_2, sub_2, distance, mul, abs_3, diff], Original ATen: [aten.sub, aten.abs, aten.lift_fresh, aten.minimum, aten.mul, aten.sqrt] # Source node to ATen node mapping: # abs_1 => abs_1 # abs_2 => abs_2 # abs_3 => abs_3 # diff => sqrt # distance => minimum # mul => mul # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # tensor => full_default # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 6.2831854820251465), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default, %abs_2), kwargs = {}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%abs_1, %sub_2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%minimum, %minimum), kwargs = {}) # %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul,), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%abs_3,), kwargs = {}) triton_poi_fused_abs_lift_fresh_minimum_mul_sqrt_sub_0 = async_compile.triton('triton_poi_fused_abs_lift_fresh_minimum_mul_sqrt_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_lift_fresh_minimum_mul_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_lift_fresh_minimum_mul_sqrt_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tmp1 - tmp0 tmp5 = tl_math.abs(tmp4) tmp6 = 6.2831854820251465 tmp7 = tmp6 - tmp5 tmp8 = triton_helpers.minimum(tmp3, tmp7) tmp9 = tmp8 * tmp8 tmp10 = tl_math.abs(tmp9) tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, abs_1, tensor, sub_1, abs_2, sub_2, distance, mul, abs_3, diff], Original ATen: [aten.sub, aten.abs, aten.lift_fresh, aten.minimum, aten.mul, aten.sqrt] stream0 = get_raw_stream(0) triton_poi_fused_abs_lift_fresh_minimum_mul_sqrt_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn def calc_angular_difference(a1, a2): distance = torch.min(torch.abs(a1 - a2), torch.tensor(2 * math.pi) - torch.abs(a2 - a1)) diff = torch.sqrt(torch.abs(distance * distance)) return diff class AngularLoss(nn.Module): def __init__(self): super(AngularLoss, self).__init__() def forward(self, predicted, actual, mask=None): return calc_angular_difference(predicted, actual) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_lift_fresh_minimum_mul_sqrt_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tmp1 - tmp0 tmp5 = tl_math.abs(tmp4) tmp6 = 6.2831854820251465 tmp7 = tmp6 - tmp5 tmp8 = triton_helpers.minimum(tmp3, tmp7) tmp9 = tmp8 * tmp8 tmp10 = tl_math.abs(tmp9) tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_lift_fresh_minimum_mul_sqrt_sub_0[grid(256)]( arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, def calc_angular_difference(a1, a2): distance = torch.min(torch.abs(a1 - a2), torch.tensor(2 * math.pi) - torch.abs(a2 - a1)) diff = torch.sqrt(torch.abs(distance * distance)) return diff class AngularLossNew(nn.Module): def __init__(self): super(AngularLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
donikv/rgn_pytorch
AngularLoss
false
1,863
[ "MIT" ]
0
95f8cd36fec5655f9bfd8634fff89b06e81dc2ed
https://github.com/donikv/rgn_pytorch/tree/95f8cd36fec5655f9bfd8634fff89b06e81dc2ed
import math import torch import torch.nn as nn def calc_angular_difference(a1, a2): distance = torch.min(torch.abs(a1 - a2), torch.tensor(2 * math.pi) - torch.abs(a2 - a1)) diff = torch.sqrt(torch.abs(distance * distance)) return diff class Model(nn.Module): def __init__(self): super().__init__() def forward(self, predicted, actual, mask=None): return calc_angular_difference(predicted, actual) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/aq/caqpjuuece3s6x373jnyq7bu3iejq6wutzx3vhurepwmv4qavz2v.py # Topologically Sorted Source Nodes: [queries_2], Original ATen: [aten.div] # Source node to ATen node mapping: # queries_2 => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_10, 1.4142135623730951), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 32 x2 = (xindex // 128) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 % 8)) + (32*x2) + (128*(x1 // 8))), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/l3/cl3mi735redoj4rwv2jx3ani3cnzywllcge3lqnceydwaqjq4u4c.py # Topologically Sorted Source Nodes: [keys_2], Original ATen: [aten.div] # Source node to ATen node mapping: # keys_2 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_9, 1.4142135623730951), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*(x2 % 8)) + (32*x1) + (128*(x2 // 8))), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/sa/csacntwu4piu2d25glgmaalubpyinnrucmyvwofq5avgc2qlbgag.py # Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # dot_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/d2/cd2tzxoj6qyacqcakrmgzt7bvwbnjpz3zietppzw2tbanup3un7q.py # Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # dot_1 => div_2, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/uo/cuoxsr75anbpejcefsbh4bf2mxvqfhckj7irhar4w5pvj4n72lyq.py # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_2 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 8 x3 = (xindex // 128) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (32*x1) + (128*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jt/cjtb7mgxbriyt2ad443e5zszp6avjfe6ghtgeigk6ftcjxoidhg5.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 8 x2 = (xindex // 32) % 4 x3 = (xindex // 128) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (128*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/5a/c5a3ez5wlpfg6wikyo4pxvg7lmx4bhegiz2dmnoq7brzv32gwu6v.py # Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose] # Source node to ATen node mapping: # Graph fragment: # %permute_16 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%div, [0, 2, 1]), kwargs = {}) triton_poi_fused_transpose_6 = async_compile.triton('triton_poi_fused_transpose_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_transpose_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (128*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (32, 4), (4, 1)) assert_size_stride(primals_3, (32, 4), (4, 1)) assert_size_stride(primals_4, (32, 4), (4, 1)) assert_size_stride(primals_5, (4, 32), (32, 1)) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 32), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((32, 4, 4), (4, 128, 1), torch.float32) # Topologically Sorted Source Nodes: [queries_2], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(buf0, buf3, 512, grid=grid(512), stream=stream0) buf4 = reinterpret_tensor(buf0, (32, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [keys_2], Original ATen: [aten.div] triton_poi_fused_div_1.run(buf1, buf4, 512, grid=grid(512), stream=stream0) buf5 = reinterpret_tensor(buf1, (32, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [queries_2, dot], Original ATen: [aten.div, aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (32, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((32, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 512, grid=grid(512), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf6, buf7, 512, grid=grid(512), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 8, 4, 4), (128, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf2, buf8, 512, grid=grid(512), stream=stream0) buf9 = reinterpret_tensor(buf2, (32, 4, 4), (16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (32, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf9, buf10, 512, grid=grid(512), stream=stream0) buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 32), (32, 1), 0), reinterpret_tensor(primals_5, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf11) del primals_6 buf12 = reinterpret_tensor(buf9, (32, 4, 4), (16, 1, 4), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose] triton_poi_fused_transpose_6.run(buf3, buf12, 512, grid=grid(512), stream=stream0) del buf3 return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 32), (32, 1), 0), primals_5, reinterpret_tensor(buf8, (32, 4, 4), (16, 1, 4), 0), buf12, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SelfAttention(nn.Module): def __init__(self, k, heads=8): super().__init__() self.k = k self.heads = heads self.to_keys = nn.Linear(k, k * heads, bias=False) self.to_queries = nn.Linear(k, k * heads, bias=False) self.to_values = nn.Linear(k, k * heads, bias=False) self.unify_heads = nn.Linear(heads * k, k) def forward(self, x): b, t, k = x.size() h = self.heads queries = self.to_queries(x).view(b, t, h, k) keys = self.to_keys(x).view(b, t, h, k) values = self.to_values(x).view(b, t, h, k) keys = keys.transpose(1, 2).contiguous().view(b * h, t, k) queries = queries.transpose(1, 2).contiguous().view(b * h, t, k) values = values.transpose(1, 2).contiguous().view(b * h, t, k) queries = queries / k ** (1 / 4) keys = keys / k ** (1 / 4) dot = torch.bmm(queries, keys.transpose(1, 2)) dot = F.softmax(dot, dim=2) out = torch.bmm(dot, values).view(b, h, t, k) out = out.transpose(1, 2).contiguous().view(b, t, h * k) return self.unify_heads(out) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 32 x2 = xindex // 128 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 8) + 32 * x2 + 128 * (x1 // 8) ), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x2 % 8) + 32 * x1 + 128 * (x2 // 8) ), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 8 x3 = xindex // 128 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 32 * x1 + 128 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 8 x2 = xindex // 32 % 4 x3 = xindex // 128 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 128 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_transpose_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 128 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (32, 4), (4, 1)) assert_size_stride(primals_3, (32, 4), (4, 1)) assert_size_stride(primals_4, (32, 4), (4, 1)) assert_size_stride(primals_5, (4, 32), (32, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 32), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 32), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((32, 4, 4), (4, 128, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(512)](buf0, buf3, 512, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf0, (32, 4, 4), (16, 4, 1), 0) del buf0 triton_poi_fused_div_1[grid(512)](buf1, buf4, 512, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf1, (32, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (32, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((32, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(512)](buf5, buf6, 512, XBLOCK=128, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_3[grid(512)](buf6, buf7, 512, XBLOCK=256, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 8, 4, 4), (128, 16, 4, 1), 0) del buf6 triton_poi_fused_clone_4[grid(512)](buf2, buf8, 512, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (32, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (32, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32 ) triton_poi_fused_clone_5[grid(512)](buf9, buf10, 512, XBLOCK=256, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, reinterpret_tensor(buf10, (16, 32), (32, 1), 0), reinterpret_tensor(primals_5, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf11) del primals_6 buf12 = reinterpret_tensor(buf9, (32, 4, 4), (16, 1, 4), 0) del buf9 triton_poi_fused_transpose_6[grid(512)](buf3, buf12, 512, XBLOCK= 128, num_warps=4, num_stages=1) del buf3 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 32), (32, 1), 0 ), primals_5, reinterpret_tensor(buf8, (32, 4, 4), (16, 1, 4), 0 ), buf12, buf4 class SelfAttentionNew(nn.Module): def __init__(self, k, heads=8): super().__init__() self.k = k self.heads = heads self.to_keys = nn.Linear(k, k * heads, bias=False) self.to_queries = nn.Linear(k, k * heads, bias=False) self.to_values = nn.Linear(k, k * heads, bias=False) self.unify_heads = nn.Linear(heads * k, k) def forward(self, input_0): primals_2 = self.to_keys.weight primals_3 = self.to_queries.weight primals_4 = self.to_values.weight primals_5 = self.unify_heads.weight primals_6 = self.unify_heads.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
dogeplusplus/sandbox
SelfAttention
false
1,864
[ "MIT" ]
0
c9041c06da9454f6c3cec622abbbf918c9f13bdc
https://github.com/dogeplusplus/sandbox/tree/c9041c06da9454f6c3cec622abbbf918c9f13bdc
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, k, heads=8): super().__init__() self.k = k self.heads = heads self.to_keys = nn.Linear(k, k * heads, bias=False) self.to_queries = nn.Linear(k, k * heads, bias=False) self.to_values = nn.Linear(k, k * heads, bias=False) self.unify_heads = nn.Linear(heads * k, k) def forward(self, x): b, t, k = x.size() h = self.heads queries = self.to_queries(x).view(b, t, h, k) keys = self.to_keys(x).view(b, t, h, k) values = self.to_values(x).view(b, t, h, k) keys = keys.transpose(1, 2).contiguous().view(b * h, t, k) queries = queries.transpose(1, 2).contiguous().view(b * h, t, k) values = values.transpose(1, 2).contiguous().view(b * h, t, k) queries = queries / k ** (1 / 4) keys = keys / k ** (1 / 4) dot = torch.bmm(queries, keys.transpose(1, 2)) dot = F.softmax(dot, dim=2) out = torch.bmm(dot, values).view(b, h, t, k) out = out.transpose(1, 2).contiguous().view(b, t, h * k) return self.unify_heads(out) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
GlobalWeightedAvgPool2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/jz/cjzoc6xisenudem73wv6jtmndx35ok7u4hfkm7ipl6rjq5za2ecw.py # Topologically Sorted Source Nodes: [m, sigmoid, m_1, sum_1], Original ATen: [aten.convolution, aten.sigmoid, aten.exp, aten.sum] # Source node to ATen node mapping: # m => convolution # m_1 => exp # sigmoid => sigmoid # sum_1 => sum_1 # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sigmoid,), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2, 3], True), kwargs = {}) triton_per_fused_convolution_exp_sigmoid_sum_0 = async_compile.triton('triton_per_fused_convolution_exp_sigmoid_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_exp_sigmoid_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_convolution_exp_sigmoid_sum_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(in_out_ptr0 + (r1 + (16*x0)), tmp3, xmask) tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/u7/cu7tuonvj2l2bybmuzk4zjpmuntxy47o26ta565v5zj47gq5o2pu.py # Topologically Sorted Source Nodes: [sigmoid, m_1, x, x_1, x_2], Original ATen: [aten.sigmoid, aten.exp, aten.div, aten.mul, aten.sum] # Source node to ATen node mapping: # m_1 => exp # sigmoid => sigmoid # x => div # x_1 => mul # x_2 => sum_2 # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sigmoid,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3], True), kwargs = {}) triton_per_fused_div_exp_mul_sigmoid_sum_1 = async_compile.triton('triton_per_fused_div_exp_mul_sigmoid_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_mul_sigmoid_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_exp_mul_sigmoid_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = (xindex // 4) x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (16*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp2 = tl_math.exp(tmp1) tmp4 = tmp2 / tmp3 tmp6 = tmp4 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [m], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [m, sigmoid, m_1, sum_1], Original ATen: [aten.convolution, aten.sigmoid, aten.exp, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_convolution_exp_sigmoid_sum_0.run(buf1, primals_3, buf2, 4, 16, grid=grid(4), stream=stream0) del primals_3 buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, m_1, x, x_1, x_2], Original ATen: [aten.sigmoid, aten.exp, aten.div, aten.mul, aten.sum] triton_per_fused_div_exp_mul_sigmoid_sum_1.run(buf1, buf2, primals_1, buf3, 16, 16, grid=grid(16), stream=stream0) return (buf3, primals_1, primals_2, buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed def import_flatten_impl(): global flatten_impl, unflatten_impl, imported_flatten_impl try: flatten_impl = apex_C.flatten unflatten_impl = apex_C.unflatten except ImportError: None flatten_impl = torch._utils._flatten_dense_tensors unflatten_impl = torch._utils._unflatten_dense_tensors imported_flatten_impl = True def flatten(bucket): if not imported_flatten_impl: import_flatten_impl() return flatten_impl(bucket) class GlobalWeightedAvgPool2d(nn.Module): """ Global Weighted Average Pooling from paper "Global Weighted Average Pooling Bridges Pixel-level Localization and Image-level Classification" """ def __init__(self, features: 'int', flatten=False): super().__init__() self.conv = nn.Conv2d(features, 1, kernel_size=1, bias=True) self.flatten = flatten def fscore(self, x): m = self.conv(x) m = m.sigmoid().exp() return m def norm(self, x: 'torch.Tensor'): return x / x.sum(dim=[2, 3], keepdim=True) def forward(self, x): input_x = x x = self.fscore(x) x = self.norm(x) x = x * input_x x = x.sum(dim=[2, 3], keepdim=not self.flatten) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_convolution_exp_sigmoid_sum_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp3, xmask) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_per_fused_div_exp_mul_sigmoid_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x1 = xindex // 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 16 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp2 = tl_math.exp(tmp1) tmp4 = tmp2 / tmp3 tmp6 = tmp4 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) get_raw_stream(0) triton_per_fused_convolution_exp_sigmoid_sum_0[grid(4)](buf1, primals_3, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_per_fused_div_exp_mul_sigmoid_sum_1[grid(16)](buf1, buf2, primals_1, buf3, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) return buf3, primals_1, primals_2, buf1, buf2 def import_flatten_impl(): global flatten_impl, unflatten_impl, imported_flatten_impl try: flatten_impl = apex_C.flatten unflatten_impl = apex_C.unflatten except ImportError: None flatten_impl = torch._utils._flatten_dense_tensors unflatten_impl = torch._utils._unflatten_dense_tensors imported_flatten_impl = True def flatten(bucket): if not imported_flatten_impl: import_flatten_impl() return flatten_impl(bucket) class GlobalWeightedAvgPool2dNew(nn.Module): """ Global Weighted Average Pooling from paper "Global Weighted Average Pooling Bridges Pixel-level Localization and Image-level Classification" """ def __init__(self, features: 'int', flatten=False): super().__init__() self.conv = nn.Conv2d(features, 1, kernel_size=1, bias=True) self.flatten = flatten def fscore(self, x): m = self.conv(x) m = m.sigmoid().exp() return m def norm(self, x: 'torch.Tensor'): return x / x.sum(dim=[2, 3], keepdim=True) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
dong03/dfdc_deepfake_challenge
GlobalWeightedAvgPool2d
false
1,865
[ "MIT" ]
0
bee310d0e4f1f6c9bd8ec7c0c97a98b52667673d
https://github.com/dong03/dfdc_deepfake_challenge/tree/bee310d0e4f1f6c9bd8ec7c0c97a98b52667673d
import torch import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed def import_flatten_impl(): global flatten_impl, unflatten_impl, imported_flatten_impl try: flatten_impl = apex_C.flatten unflatten_impl = apex_C.unflatten except ImportError: None flatten_impl = torch._utils._flatten_dense_tensors unflatten_impl = torch._utils._unflatten_dense_tensors imported_flatten_impl = True def flatten(bucket): if not imported_flatten_impl: import_flatten_impl() return flatten_impl(bucket) class Model(nn.Module): """ Global Weighted Average Pooling from paper "Global Weighted Average Pooling Bridges Pixel-level Localization and Image-level Classification" """ def __init__(self, features: 'int', flatten=False): super().__init__() self.conv = nn.Conv2d(features, 1, kernel_size=1, bias=True) self.flatten = flatten def fscore(self, x): m = self.conv(x) m = m.sigmoid().exp() return m def norm(self, x: 'torch.Tensor'): return x / x.sum(dim=[2, 3], keepdim=True) def forward(self, x): input_x = x x = self.fscore(x) x = self.norm(x) x = x * input_x x = x.sum(dim=[2, 3], keepdim=not self.flatten) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
BertTextPooler
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone] # Source node to ATen node mapping: # pooled_output => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xk/cxkfjvxcrwrocrik25vel4gb2spp4jrbijo33ra4mgkw3hn2qgah.py # Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # pooled_output => add # pooled_output_1 => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_1.run(buf2, primals_3, buf3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch from torch import nn class BertTextPooler(nn.Module): def __init__(self, config): super(BertTextPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.bi_hidden_size) self.activation = nn.ReLU() def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, bi_hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(64)](buf2, primals_3, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3 class BertTextPoolerNew(nn.Module): def __init__(self, config): super(BertTextPoolerNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.bi_hidden_size) self.activation = nn.ReLU() def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
eaidova/lxmert
BertTextPooler
false
1,866
[ "MIT" ]
0
c74616907125242112c6ee5c516b54c250168e8b
https://github.com/eaidova/lxmert/tree/c74616907125242112c6ee5c516b54c250168e8b
from _paritybench_helpers import _mock_config import torch from torch import nn class Model(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.bi_hidden_size) self.activation = nn.ReLU() def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
HeatmapLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/j2/cj2fuoriecywpqfpirtvfrrmhvdesdfz5ycsvka6dsuyyjauwrf7.py # Topologically Sorted Source Nodes: [sub, pow_1, loss, mean], Original ATen: [aten.sub, aten.pow, aten.mul, aten.mean] # Source node to ATen node mapping: # loss => mul # mean => mean # pow_1 => pow_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %arg2_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [3]), kwargs = {}) triton_poi_fused_mean_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_mean_mul_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp3 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp8 tmp11 = tmp9 * tmp10 tmp12 = tmp5 + tmp11 tmp15 = tmp13 - tmp14 tmp16 = tmp15 * tmp15 tmp18 = tmp16 * tmp17 tmp19 = tmp12 + tmp18 tmp22 = tmp20 - tmp21 tmp23 = tmp22 * tmp22 tmp25 = tmp23 * tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/52/c523pn66wra4rfjkrgcnorjvvivoimaibk37xqhluh2ynu4fjzq7.py # Topologically Sorted Source Nodes: [mean_1, loss_1], Original ATen: [aten.mean] # Source node to ATen node mapping: # loss_1 => mean_2 # mean_1 => mean_1 # Graph fragment: # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2]), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_1, [1]), kwargs = {}) triton_poi_fused_mean_1 = async_compile.triton('triton_poi_fused_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + (x0), tmp36, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, pow_1, loss, mean], Original ATen: [aten.sub, aten.pow, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_mul_pow_sub_0.run(arg0_1, arg1_1, arg2_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 del arg2_1 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mean_1, loss_1], Original ATen: [aten.mean] triton_poi_fused_mean_1.run(buf0, buf1, 4, grid=grid(4), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed import torch.multiprocessing class HeatmapLoss(nn.Module): def __init__(self): super().__init__() def forward(self, pred, gt, mask): assert pred.size() == gt.size() loss = (pred - gt) ** 2 * mask loss = loss.mean(dim=3).mean(dim=2).mean(dim=1) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed import torch.multiprocessing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp3 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 * tmp8 tmp11 = tmp9 * tmp10 tmp12 = tmp5 + tmp11 tmp15 = tmp13 - tmp14 tmp16 = tmp15 * tmp15 tmp18 = tmp16 * tmp17 tmp19 = tmp12 + tmp18 tmp22 = tmp20 - tmp21 tmp23 = tmp22 * tmp22 tmp25 = tmp23 * tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + x0, tmp36, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_mul_pow_sub_0[grid(64)](arg0_1, arg1_1, arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_1[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps =1, num_stages=1) del buf0 return buf1, class HeatmapLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ducongju/Scale-sensitive-Heatmap
HeatmapLoss
false
1,867
[ "MIT" ]
0
4016610ba96a6a6645895bbf4bcdb3ff4690a2d8
https://github.com/ducongju/Scale-sensitive-Heatmap/tree/4016610ba96a6a6645895bbf4bcdb3ff4690a2d8
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed import torch.multiprocessing class Model(nn.Module): def __init__(self): super().__init__() def forward(self, pred, gt, mask): assert pred.size() == gt.size() loss = (pred - gt) ** 2 * mask loss = loss.mean(dim=3).mean(dim=2).mean(dim=1) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return []
FocalL2Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/wk/cwkf64p2awlt4uhkbzwctplgk7lo7cwoikrsk4av6eqcge5xdaff.py # Topologically Sorted Source Nodes: [sub_4, pow_1, ge, sub, sub_1, sub_2, st, sub_3, factor, mul, loss, mean], Original ATen: [aten.sub, aten.pow, aten.ge, aten.rsub, aten.where, aten.abs, aten.mul, aten.mean] # Source node to ATen node mapping: # factor => abs_1 # ge => ge # loss => mul_1 # mean => mean # mul => mul # pow_1 => pow_1 # st => where # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # sub_3 => sub_3 # sub_4 => sub_4 # Graph fragment: # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_4, 2), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg1_1, 0.01), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0.1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, 0.02), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ge, %sub, %sub_2), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %where), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %abs_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg2_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_1, [3]), kwargs = {}) triton_poi_fused_abs_ge_mean_mul_pow_rsub_sub_where_0 = async_compile.triton('triton_poi_fused_abs_ge_mean_mul_pow_rsub_sub_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_ge_mean_mul_pow_rsub_sub_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_ge_mean_mul_pow_rsub_sub_where_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp49 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp60 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 0.01 tmp5 = tmp1 >= tmp4 tmp6 = 0.1 tmp7 = tmp0 - tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp0 tmp10 = 0.02 tmp11 = tmp9 - tmp10 tmp12 = tl.where(tmp5, tmp7, tmp11) tmp13 = tmp8 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tmp3 * tmp14 tmp17 = tmp15 * tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp19 >= tmp4 tmp23 = tmp18 - tmp6 tmp24 = tmp8 - tmp18 tmp25 = tmp24 - tmp10 tmp26 = tl.where(tmp22, tmp23, tmp25) tmp27 = tmp8 - tmp26 tmp28 = tl_math.abs(tmp27) tmp29 = tmp21 * tmp28 tmp31 = tmp29 * tmp30 tmp32 = tmp17 + tmp31 tmp35 = tmp33 - tmp34 tmp36 = tmp35 * tmp35 tmp37 = tmp34 >= tmp4 tmp38 = tmp33 - tmp6 tmp39 = tmp8 - tmp33 tmp40 = tmp39 - tmp10 tmp41 = tl.where(tmp37, tmp38, tmp40) tmp42 = tmp8 - tmp41 tmp43 = tl_math.abs(tmp42) tmp44 = tmp36 * tmp43 tmp46 = tmp44 * tmp45 tmp47 = tmp32 + tmp46 tmp50 = tmp48 - tmp49 tmp51 = tmp50 * tmp50 tmp52 = tmp49 >= tmp4 tmp53 = tmp48 - tmp6 tmp54 = tmp8 - tmp48 tmp55 = tmp54 - tmp10 tmp56 = tl.where(tmp52, tmp53, tmp55) tmp57 = tmp8 - tmp56 tmp58 = tl_math.abs(tmp57) tmp59 = tmp51 * tmp58 tmp61 = tmp59 * tmp60 tmp62 = tmp47 + tmp61 tmp63 = 4.0 tmp64 = tmp62 / tmp63 tl.store(out_ptr0 + (x0), tmp64, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/52/c523pn66wra4rfjkrgcnorjvvivoimaibk37xqhluh2ynu4fjzq7.py # Topologically Sorted Source Nodes: [mean_1, loss_1], Original ATen: [aten.mean] # Source node to ATen node mapping: # loss_1 => mean_2 # mean_1 => mean_1 # Graph fragment: # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2]), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_1, [1]), kwargs = {}) triton_poi_fused_mean_1 = async_compile.triton('triton_poi_fused_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + (x0), tmp36, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_4, pow_1, ge, sub, sub_1, sub_2, st, sub_3, factor, mul, loss, mean], Original ATen: [aten.sub, aten.pow, aten.ge, aten.rsub, aten.where, aten.abs, aten.mul, aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_abs_ge_mean_mul_pow_rsub_sub_where_0.run(arg0_1, arg1_1, arg2_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 del arg2_1 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mean_1, loss_1], Original ATen: [aten.mean] triton_poi_fused_mean_1.run(buf0, buf1, 4, grid=grid(4), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed import torch.multiprocessing class FocalL2Loss(nn.Module): """ Compute focal l2 loss between predict and groundtruth :param thre: the threshold to distinguish between the foreground heatmap pixels and the background heatmap pixels :param alpha beta: compensation factors to reduce the punishment of easy samples (both easy foreground pixels and easy background pixels) """ def __init__(self, thre=0.01, alpha=0.1, beta=0.02): super().__init__() self.thre = thre self.alpha = alpha self.beta = beta def forward(self, pred, gt, mask): assert pred.size() == gt.size() st = torch.where(torch.ge(gt, self.thre), pred - self.alpha, 1 - pred - self.beta) factor = torch.abs(1.0 - st) loss = (pred - gt) ** 2 * factor * mask loss = loss.mean(dim=3).mean(dim=2).mean(dim=1) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed import torch.multiprocessing assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_ge_mean_mul_pow_rsub_sub_where_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp33 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp34 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp45 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp49 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp60 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 0.01 tmp5 = tmp1 >= tmp4 tmp6 = 0.1 tmp7 = tmp0 - tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp0 tmp10 = 0.02 tmp11 = tmp9 - tmp10 tmp12 = tl.where(tmp5, tmp7, tmp11) tmp13 = tmp8 - tmp12 tmp14 = tl_math.abs(tmp13) tmp15 = tmp3 * tmp14 tmp17 = tmp15 * tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp19 >= tmp4 tmp23 = tmp18 - tmp6 tmp24 = tmp8 - tmp18 tmp25 = tmp24 - tmp10 tmp26 = tl.where(tmp22, tmp23, tmp25) tmp27 = tmp8 - tmp26 tmp28 = tl_math.abs(tmp27) tmp29 = tmp21 * tmp28 tmp31 = tmp29 * tmp30 tmp32 = tmp17 + tmp31 tmp35 = tmp33 - tmp34 tmp36 = tmp35 * tmp35 tmp37 = tmp34 >= tmp4 tmp38 = tmp33 - tmp6 tmp39 = tmp8 - tmp33 tmp40 = tmp39 - tmp10 tmp41 = tl.where(tmp37, tmp38, tmp40) tmp42 = tmp8 - tmp41 tmp43 = tl_math.abs(tmp42) tmp44 = tmp36 * tmp43 tmp46 = tmp44 * tmp45 tmp47 = tmp32 + tmp46 tmp50 = tmp48 - tmp49 tmp51 = tmp50 * tmp50 tmp52 = tmp49 >= tmp4 tmp53 = tmp48 - tmp6 tmp54 = tmp8 - tmp48 tmp55 = tmp54 - tmp10 tmp56 = tl.where(tmp52, tmp53, tmp55) tmp57 = tmp8 - tmp56 tmp58 = tl_math.abs(tmp57) tmp59 = tmp51 * tmp58 tmp61 = tmp59 * tmp60 tmp62 = tmp47 + tmp61 tmp63 = 4.0 tmp64 = tmp62 / tmp63 tl.store(out_ptr0 + x0, tmp64, xmask) @triton.jit def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + x0, tmp36, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_ge_mean_mul_pow_rsub_sub_where_0[grid(64)](arg0_1, arg1_1, arg2_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 del arg2_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mean_1[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps =1, num_stages=1) del buf0 return buf1, class FocalL2LossNew(nn.Module): """ Compute focal l2 loss between predict and groundtruth :param thre: the threshold to distinguish between the foreground heatmap pixels and the background heatmap pixels :param alpha beta: compensation factors to reduce the punishment of easy samples (both easy foreground pixels and easy background pixels) """ def __init__(self, thre=0.01, alpha=0.1, beta=0.02): super().__init__() self.thre = thre self.alpha = alpha self.beta = beta def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ducongju/Scale-sensitive-Heatmap
FocalL2Loss
false
1,868
[ "MIT" ]
0
4016610ba96a6a6645895bbf4bcdb3ff4690a2d8
https://github.com/ducongju/Scale-sensitive-Heatmap/tree/4016610ba96a6a6645895bbf4bcdb3ff4690a2d8
import torch import torch.nn as nn import torch.utils.data import torch.nn.parallel import torch.optim import torch.utils.data.distributed import torch.multiprocessing class Model(nn.Module): """ Compute focal l2 loss between predict and groundtruth :param thre: the threshold to distinguish between the foreground heatmap pixels and the background heatmap pixels :param alpha beta: compensation factors to reduce the punishment of easy samples (both easy foreground pixels and easy background pixels) """ def __init__(self, thre=0.01, alpha=0.1, beta=0.02): super().__init__() self.thre = thre self.alpha = alpha self.beta = beta def forward(self, pred, gt, mask): assert pred.size() == gt.size() st = torch.where(torch.ge(gt, self.thre), pred - self.alpha, 1 - pred - self.beta) factor = torch.abs(1.0 - st) loss = (pred - gt) ** 2 * factor * mask loss = loss.mean(dim=3).mean(dim=2).mean(dim=1) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return []
PermEqui2_mean
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/2c/c2caasuan6xkydnq2bvliamlyid6cl5fcz5kcz2mnyns45wtxqbs.py # Topologically Sorted Source Nodes: [xm], Original ATen: [aten.mean] # Source node to ATen node mapping: # xm => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [0], True), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/pn/cpnnzl6m333iefaraazao24mhav6pkxsbbvvmquaumoxtijub6i2.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sub] # Source node to ATen node mapping: # x_1 => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %view_1), kwargs = {}) triton_poi_fused_sub_1 = async_compile.triton('triton_poi_fused_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x4 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [xm], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [xm_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) del primals_3 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sub] triton_poi_fused_sub_1.run(buf3, primals_4, buf1, 256, grid=grid(256), stream=stream0) del buf1 del primals_4 return (buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class PermEqui2_mean(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.Gamma = nn.Linear(in_dim, out_dim) self.Lambda = nn.Linear(in_dim, out_dim, bias=False) self.weight = self.Gamma.weight self.bias = self.Gamma.bias def forward(self, x): xm = x.mean(0, keepdim=True) xm = self.Lambda(xm) x = self.Gamma(x) x = x - xm return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x4 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) del primals_3 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_sub_1[grid(256)](buf3, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_4 return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0) class PermEqui2_meanNew(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.Gamma = nn.Linear(in_dim, out_dim) self.Lambda = nn.Linear(in_dim, out_dim, bias=False) self.weight = self.Gamma.weight self.bias = self.Gamma.bias def forward(self, input_0): primals_2 = self.weight primals_4 = self.bias primals_3 = self.Lambda.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
dvirsamuel/CrowdDet
PermEqui2_mean
false
1,869
[ "MIT" ]
0
db729bf71c0ca72229e5d446019769e095fdaa79
https://github.com/dvirsamuel/CrowdDet/tree/db729bf71c0ca72229e5d446019769e095fdaa79
import torch from torch import nn class Model(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.Gamma = nn.Linear(in_dim, out_dim) self.Lambda = nn.Linear(in_dim, out_dim, bias=False) self.weight = self.Gamma.weight self.bias = self.Gamma.bias def forward(self, x): xm = x.mean(0, keepdim=True) xm = self.Lambda(xm) x = self.Gamma(x) x = x - xm return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
CNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/2k/c2kzyqhyqyyqxgdp5r6nzaspxrwbwb4qohpyta25dmum4idd6ste.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 7744 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 121) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ok/cok24qqkdtk523r6eczgwefb5jwnbf2l727mh6c4zyyluklyos2t.py # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x3), tmp4, xmask) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/cf/ccfgishslsf55ewxlp6yqat4yfexhiuuv6gzyxxokqtaczlnr2lh.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (16, 1, 8, 8), (64, 64, 8, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 1, 48, 48), (2304, 2304, 48, 1)) assert_size_stride(primals_4, (32, 16, 4, 4), (256, 16, 4, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (256, 512), (512, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (4, 256), (256, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 11, 11), (1936, 121, 11, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 7744, grid=grid(7744), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 2, 2), (128, 4, 2, 1)) buf3 = buf2; del buf2 # reuse buf7 = empty_strided_cuda((4, 32, 2, 2), (128, 4, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf7, 512, grid=grid(512), stream=stream0) del primals_5 buf4 = empty_strided_cuda((1, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (1, 512), (0, 1), 0), reinterpret_tensor(primals_6, (512, 256), (1, 512), 0), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf6) del primals_9 return (buf6, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3, (1, 512), (512, 1), 0), buf5, primals_8, primals_6, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 1, 8, 8), (64, 64, 8, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 48, 48), (2304, 2304, 48, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.nn.functional as F class CNN(nn.Module): """Convolutional Neural Network""" def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(1, 16, 8, 4) self.conv2 = nn.Conv2d(16, 32, 4, 4) self.fc1 = nn.Linear(32 * 4 * 4, 256) self.fc2 = nn.Linear(256, 4) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = x.view(-1, 32 * 4 * 4) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 1, 48, 48])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 7744 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 121 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (16, 1, 8, 8), (64, 64, 8, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 1, 48, 48), (2304, 2304, 48, 1)) assert_size_stride(primals_4, (32, 16, 4, 4), (256, 16, 4, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (256, 512), (512, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (4, 256), (256, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 11, 11), (1936, 121, 11, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(7744)](buf1, primals_2, 7744, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 2, 2), (128, 4, 2, 1)) buf3 = buf2 del buf2 buf7 = empty_strided_cuda((4, 32, 2, 2), (128, 4, 2, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(512)](buf3, primals_5, buf7, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((1, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (1, 512), (0, 1), 0), reinterpret_tensor(primals_6, (512, 256), (1, 512), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(256)](buf5, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf6) del primals_9 return buf6, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3 , (1, 512), (512, 1), 0), buf5, primals_8, primals_6, buf7 class CNNNew(nn.Module): """Convolutional Neural Network""" def __init__(self): super(CNNNew, self).__init__() self.conv1 = nn.Conv2d(1, 16, 8, 4) self.conv2 = nn.Conv2d(16, 32, 4, 4) self.fc1 = nn.Linear(32 * 4 * 4, 256) self.fc2 = nn.Linear(256, 4) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
dziganto/DQN
CNN
false
1,870
[ "MIT" ]
0
033de76a2295ddc5d9775cfd2612a9d79634547e
https://github.com/dziganto/DQN/tree/033de76a2295ddc5d9775cfd2612a9d79634547e
import torch import torch.nn as nn import torch.nn.parallel import torch.nn.functional as F class Model(nn.Module): """Convolutional Neural Network""" def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 16, 8, 4) self.conv2 = nn.Conv2d(16, 32, 4, 4) self.fc1 = nn.Linear(32 * 4 * 4, 256) self.fc2 = nn.Linear(256, 4) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = x.view(-1, 32 * 4 * 4) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 1, 48, 48])] def get_init_inputs(): return []
Wav2Vec2ClassificationHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_2 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Wav2Vec2ClassificationHead(nn.Module): """Head for wav2vec classification task.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.final_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, final_dropout=0.5, num_labels=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4 class Wav2Vec2ClassificationHeadNew(nn.Module): """Head for wav2vec classification task.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.final_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Ayushk4/MedImaging
Wav2Vec2ClassificationHead
false
1,871
[ "MIT" ]
0
dbc8968f076385be0c8db42210817ae0940fa26a
https://github.com/Ayushk4/MedImaging/tree/dbc8968f076385be0c8db42210817ae0940fa26a
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Model(nn.Module): """Head for wav2vec classification task.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.final_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, final_dropout=0.5, num_labels=4)}]
TransposeMultiheadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ao/caoovxtqrx42gvkmjirowqmmbh6kppvfh5ebrzzv4kzkgwm2umii.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/yi/cyis4mboyae5mxe2ro5oeo66oz5rc4akmej7nfmsxeoa7ahzysuw.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone_1 # Graph fragment: # %clone_1 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%squeeze,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vd/cvd6wmqfst4sb2irm2z4kur6zavvgueasfxdd7gdj23besq2ssgi.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 0.5), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4c/c4c6p4vuakt6bwm5jq3zgmnhlhi3dsci2l7ltqlyd3droewryzhj.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax, aten.mean] # Source node to ATen node mapping: # multi_head_attention_forward => div, mean, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_8, [1]), kwargs = {}) triton_poi_fused__softmax_mean_4 = async_compile.triton('triton_poi_fused__softmax_mean_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mean_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mean_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = 1.0 tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(buf1, primals_2, buf2, 192, grid=grid(192), stream=stream0) del buf1 del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 64), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = buf4; del buf4 # reuse buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax, aten.mean] triton_poi_fused__softmax_mean_4.run(buf5, buf6, buf10, 64, grid=grid(64), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 4), (4, 16, 1), 128), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_5 return (reinterpret_tensor(buf9, (4, 4, 4), (4, 16, 1), 0), buf10, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 128), reinterpret_tensor(buf3, (4, 4, 4), (4, 1, 16), 0), reinterpret_tensor(buf2, (4, 4, 4), (4, 16, 1), 64), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data from typing import Optional import torch.nn class TransposeMultiheadAttention(nn.Module): """ Wrapper for nn.MultiheadAttention which first transposes the input tensor from (batch_size, seq_len, feature_dim) to (seq_length, batch_size, feature_dim), then applies the attention and transposes the attention outputs back to the input shape. """ def __init__(self, feature_dim: 'int', num_heads: 'int'=1): """ Args: feature_dim (int): attention embedding dimension num_heads (int): number of attention heads """ super().__init__() self._attention = nn.MultiheadAttention(embed_dim=feature_dim, num_heads=num_heads) self._attention_weights = None @property def attention_weights(self) ->Optional[torch.Tensor]: """ Contains attention weights from last forward call. """ return self._attention_weights def forward(self, x: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None ) ->torch.Tensor: """ Args: x (torch.Tensor): tensor of shape (batch_size, seq_len, feature_dim) mask (torch.Tensor): bool tensor with shape (batch_size, seq_len). Sequence elements that are False are invalid. Returns: Tensor with shape (batch_size, seq_len, feature_dim) """ assert x.dim( ) == 3, 'Requires x shape (batch_size x seq_len x feature_dim)' if mask is not None: mask[:, 0] = True mask = ~mask x = x.transpose(0, 1) attn_output, self._attention_weights = self._attention(x, x, x, key_padding_mask=mask) attn_output = attn_output.transpose(0, 1) return attn_output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'feature_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data from typing import Optional import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_mean_4(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = 1.0 tmp10 = tmp8 / tmp9 tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(192)](buf1, primals_2, buf2, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (4, 16, 1), torch.float32) triton_poi_fused_mul_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 64), out=buf4) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf4 del buf4 buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_mean_4[grid(64)](buf5, buf6, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = buf5 del buf5 extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 4), (4, 16, 1), 128), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_0[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_5 return reinterpret_tensor(buf9, (4, 4, 4), (4, 16, 1), 0 ), buf10, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 16), 128 ), reinterpret_tensor(buf3, (4, 4, 4), (4, 1, 16), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (4, 16, 1), 64) class TransposeMultiheadAttentionNew(nn.Module): """ Wrapper for nn.MultiheadAttention which first transposes the input tensor from (batch_size, seq_len, feature_dim) to (seq_length, batch_size, feature_dim), then applies the attention and transposes the attention outputs back to the input shape. """ def __init__(self, feature_dim: 'int', num_heads: 'int'=1): """ Args: feature_dim (int): attention embedding dimension num_heads (int): number of attention heads """ super().__init__() self._attention = nn.MultiheadAttention(embed_dim=feature_dim, num_heads=num_heads) self._attention_weights = None @property def attention_weights(self) ->Optional[torch.Tensor]: """ Contains attention weights from last forward call. """ return self._attention_weights def forward(self, input_0): primals_3 = self._attention.in_proj_weight primals_2 = self._attention.in_proj_bias primals_4 = self._attention.out_proj.weight primals_5 = self._attention.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
denred0/pytorchvideo
TransposeMultiheadAttention
false
1,872
[ "Apache-2.0" ]
0
d874bfc9969895d2afcedea2e12bae5e1bcfb809
https://github.com/denred0/pytorchvideo/tree/d874bfc9969895d2afcedea2e12bae5e1bcfb809
import torch import torch.nn as nn import torch.utils.data from typing import Optional import torch.nn class Model(nn.Module): """ Wrapper for nn.MultiheadAttention which first transposes the input tensor from (batch_size, seq_len, feature_dim) to (seq_length, batch_size, feature_dim), then applies the attention and transposes the attention outputs back to the input shape. """ def __init__(self, feature_dim: 'int', num_heads: 'int'=1): """ Args: feature_dim (int): attention embedding dimension num_heads (int): number of attention heads """ super().__init__() self._attention = nn.MultiheadAttention(embed_dim=feature_dim, num_heads=num_heads) self._attention_weights = None @property def attention_weights(self) ->Optional[torch.Tensor]: """ Contains attention weights from last forward call. """ return self._attention_weights def forward(self, x: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None ) ->torch.Tensor: """ Args: x (torch.Tensor): tensor of shape (batch_size, seq_len, feature_dim) mask (torch.Tensor): bool tensor with shape (batch_size, seq_len). Sequence elements that are False are invalid. Returns: Tensor with shape (batch_size, seq_len, feature_dim) """ assert x.dim( ) == 3, 'Requires x shape (batch_size x seq_len x feature_dim)' if mask is not None: mask[:, 0] = True mask = ~mask x = x.transpose(0, 1) attn_output, self._attention_weights = self._attention(x, x, x, key_padding_mask=mask) attn_output = attn_output.transpose(0, 1) return attn_output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4]
SynthWide256
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/oy/coywg33tjoxmlqbwwhyvzwfsjs5n7ceqi5tzkvjci3qwulab2iv2.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8388608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 65536) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/bo/cbobvss6c6iqh2cuinv6ecehmjbf3ihhrfb5gfsiif5xta2gtdfo.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (512*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (512*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (256 + (2*x0) + (512*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (257 + (2*x0) + (512*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/fa/cfajt7vh3ahxp6emvz7olqbsg43uzopvztq57g3omsehusmvtego.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16384) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jb/cjblo3k3wgsjkcpv6rqstgy24fpa7gatyfvafzpfh2azprp3t5is.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (256*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (256*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (128 + (2*x0) + (256*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (129 + (2*x0) + (256*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/3j/c3j4jkvokwjzyp5aekzyppt7m5e3z6uaeuav42db4ccfjozaasyw.py # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # relu_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/cl/cclry2h5k76pty6kqbs5iwxj4rd2ojgszpjgdj234z5l2tmywvxz.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_2 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/b4/cb4rafbxurylblcz5jurin6ysseyfihblhiourxkkk3rbybhbhj4.py # Topologically Sorted Source Nodes: [conv2d_3, relu_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # relu_3 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/az/cazafesp4cnkhuqwee57d5uvmoijfpl4z4uq772ps4t2jxpto2tt.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_3 => getitem_6, getitem_7 # Graph fragment: # %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/mc/cmcaufjiuodhizgvps6nh6hglhzidcdegze2d3lhq6j7ew3etpvf.py # Topologically Sorted Source Nodes: [conv2d_4, relu_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # relu_4 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/sf/csfoayapm3pqgrwbr6tsnvmpq7tz7urardhxq4x7vmqmo62x7zqz.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_4 => getitem_8, getitem_9 # Graph fragment: # %getitem_8 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 0), kwargs = {}) # %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_9 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/oq/coqobqtj55mct5p5rd7wsny6k3kptdo5wralrfj5ikqajjviotdp.py # Topologically Sorted Source Nodes: [conv2d_5, relu_5], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_5 => convolution_5 # relu_5 => relu_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_8, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {}) triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 1024 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4g/c4gdhobmlqpduhitdil2o2czatqppl5il2xfdltufptmnlalyvei.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_5 => getitem_10, getitem_11 # Graph fragment: # %getitem_10 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_5, 0), kwargs = {}) # %getitem_11 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_5, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_11 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/wm/cwmqszhhc4jwiawtggxeaozlbl5b5mgjjuz7t4d2r7fjgwqwvc7q.py # Topologically Sorted Source Nodes: [conv2d_6, relu_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_6 => convolution_6 # relu_6 => relu_6 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_10, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {}) triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/rv/crv6ehtpneqa33yi3fmov2fahwophwzbykocx6jkbfbsrk6uvzq4.py # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_6 => _low_memory_max_pool2d_with_offsets_6, getitem_13 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_6 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_6, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_13 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_6, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_13 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), None, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), None, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256, ), (1, )) assert_size_stride(primals_10, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (512, ), (1, )) assert_size_stride(primals_12, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_13, (1024, ), (1, )) assert_size_stride(primals_14, (256, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (10, 4096), (4096, 1)) assert_size_stride(primals_17, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 256, 256), (2097152, 65536, 256, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 8388608, grid=grid(8388608), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 128, 128), (1048576, 16384, 128, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 4194304, grid=grid(4194304), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1048576, grid=grid(1048576), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 2097152, grid=grid(2097152), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 524288, grid=grid(524288), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d_3, relu_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf13, primals_9, 1048576, grid=grid(1048576), stream=stream0) del primals_9 buf14 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) buf15 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.int8) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_7.run(buf13, buf14, buf15, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 512, 16, 16), (131072, 256, 16, 1)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [conv2d_4, relu_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf17, primals_11, 524288, grid=grid(524288), stream=stream0) del primals_11 buf18 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32) buf19 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.int8) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_9.run(buf17, buf18, buf19, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 1024, 8, 8), (65536, 64, 8, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [conv2d_5, relu_5], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_10.run(buf21, primals_13, 262144, grid=grid(262144), stream=stream0) del primals_13 buf22 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.float32) buf23 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.int8) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_11.run(buf21, buf22, buf23, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 4, 4), (4096, 16, 4, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [conv2d_6, relu_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_12.run(buf25, primals_15, 16384, grid=grid(16384), stream=stream0) del primals_15 buf26 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.int8) buf27 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_13.run(buf25, buf26, buf27, 4096, grid=grid(4096), stream=stream0) buf28 = empty_strided_cuda((1, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm] extern_kernels.addmm(primals_17, reinterpret_tensor(buf27, (1, 4096), (0, 1), 0), reinterpret_tensor(primals_16, (4096, 10), (1, 4096), 0), alpha=1, beta=1, out=buf28) del primals_17 return (buf28, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, buf15, buf17, buf18, buf19, buf21, buf22, buf23, buf25, buf26, reinterpret_tensor(buf27, (1, 4096), (4096, 1), 0), primals_16, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 256, 256), (196608, 65536, 256, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((10, 4096), (4096, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class SynthWide256(nn.Module): def __init__(self, num_c=10, f=1): super(SynthWide256, self).__init__() self.pool = nn.MaxPool2d(2, 2) self.conv1 = nn.Conv2d(3, 32 * f, 3, padding=1) self.conv2 = nn.Conv2d(32 * f, 64 * f, 3, padding=1) self.conv3 = nn.Conv2d(64 * f, 128 * f, 3, padding=1) self.conv4 = nn.Conv2d(128 * f, 256 * f, 3, padding=1) self.conv5 = nn.Conv2d(256 * f, 512 * f, 3, padding=1) self.conv6 = nn.Conv2d(512 * f, 1024 * f, 3, padding=1) self.conv7 = nn.Conv2d(1024 * f, 256, 3, padding=1) self.fc1 = nn.Linear(256 * 4 * 4, num_c) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.pool(F.relu(self.conv4(x))) x = self.pool(F.relu(self.conv5(x))) x = self.pool(F.relu(self.conv6(x))) x = self.pool(F.relu(self.conv7(x))) x = x.view(-1, 256 * 4 * 4) x = self.fc1(x) return x def get_inputs(): return [torch.rand([4, 3, 256, 256])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 65536 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 512 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 512 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (256 + 2 * x0 + 512 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (257 + 2 * x0 + 512 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16384 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 256 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 256 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (128 + 2 * x0 + 256 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (129 + 2 * x0 + 256 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), None, eviction_policy= 'evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, None) tl.store(out_ptr1 + x2, tmp16, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 256, 256), (196608, 65536, 256, 1)) assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_13, (1024,), (1,)) assert_size_stride(primals_14, (256, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (10, 4096), (4096, 1)) assert_size_stride(primals_17, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 256, 256), (2097152, 65536, 256, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(8388608)](buf1, primals_2, 8388608, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1 ), torch.float32) buf3 = empty_strided_cuda((4, 32, 128, 128), (524288, 16384, 128, 1 ), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(2097152)](buf1, buf2, buf3, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 64, 128, 128), (1048576, 16384, 128, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(4194304)](buf5, primals_5, 4194304, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(1048576)](buf5, buf6, buf7, 1048576, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 64, 64), (524288, 4096, 64, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(2097152)](buf9, primals_7, 2097152, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(524288)](buf9, buf10, buf11, 524288, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_6[grid(1048576)](buf13, primals_9, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf14 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32) buf15 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_7[grid(262144)](buf13, buf14, buf15, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf16 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 512, 16, 16), (131072, 256, 16, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_8[grid(524288)](buf17, primals_11, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf18 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) buf19 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_9[grid(131072)](buf17, buf18, buf19, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 1024, 8, 8), (65536, 64, 8, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_10[grid(262144)](buf21, primals_13, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf22 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.float32) buf23 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_11[grid(65536)](buf21, buf22, buf23, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf24 = extern_kernels.convolution(buf22, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 256, 4, 4), (4096, 16, 4, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_12[grid(16384)](buf25, primals_15, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf26 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.int8) buf27 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_13[grid(4096)](buf25, buf26, buf27, 4096, XBLOCK=256, num_warps=4, num_stages=1) buf28 = empty_strided_cuda((1, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_17, reinterpret_tensor(buf27, (1, 4096 ), (0, 1), 0), reinterpret_tensor(primals_16, (4096, 10), (1, 4096), 0), alpha=1, beta=1, out=buf28) del primals_17 return (buf28, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, buf15, buf17, buf18, buf19, buf21, buf22, buf23, buf25, buf26, reinterpret_tensor(buf27, (1, 4096), (4096, 1), 0), primals_16) class SynthWide256New(nn.Module): def __init__(self, num_c=10, f=1): super(SynthWide256New, self).__init__() self.pool = nn.MaxPool2d(2, 2) self.conv1 = nn.Conv2d(3, 32 * f, 3, padding=1) self.conv2 = nn.Conv2d(32 * f, 64 * f, 3, padding=1) self.conv3 = nn.Conv2d(64 * f, 128 * f, 3, padding=1) self.conv4 = nn.Conv2d(128 * f, 256 * f, 3, padding=1) self.conv5 = nn.Conv2d(256 * f, 512 * f, 3, padding=1) self.conv6 = nn.Conv2d(512 * f, 1024 * f, 3, padding=1) self.conv7 = nn.Conv2d(1024 * f, 256, 3, padding=1) self.fc1 = nn.Linear(256 * 4 * 4, num_c) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.conv5.weight primals_11 = self.conv5.bias primals_12 = self.conv6.weight primals_13 = self.conv6.bias primals_14 = self.conv7.weight primals_15 = self.conv7.bias primals_16 = self.fc1.weight primals_17 = self.fc1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
dengliming/iotnets
SynthWide256
false
1,873
[ "MIT" ]
0
db744e56769c799dbf765a27fc5aa91e3edeaaa3
https://github.com/dengliming/iotnets/tree/db744e56769c799dbf765a27fc5aa91e3edeaaa3
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self, num_c=10, f=1): super().__init__() self.pool = nn.MaxPool2d(2, 2) self.conv1 = nn.Conv2d(3, 32 * f, 3, padding=1) self.conv2 = nn.Conv2d(32 * f, 64 * f, 3, padding=1) self.conv3 = nn.Conv2d(64 * f, 128 * f, 3, padding=1) self.conv4 = nn.Conv2d(128 * f, 256 * f, 3, padding=1) self.conv5 = nn.Conv2d(256 * f, 512 * f, 3, padding=1) self.conv6 = nn.Conv2d(512 * f, 1024 * f, 3, padding=1) self.conv7 = nn.Conv2d(1024 * f, 256, 3, padding=1) self.fc1 = nn.Linear(256 * 4 * 4, num_c) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.pool(F.relu(self.conv4(x))) x = self.pool(F.relu(self.conv5(x))) x = self.pool(F.relu(self.conv6(x))) x = self.pool(F.relu(self.conv7(x))) x = x.view(-1, 256 * 4 * 4) x = self.fc1(x) return x def get_inputs(): return [torch.rand([4, 3, 256, 256])] def get_init_inputs(): return []
CNNCifar
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/zv/czvfpj3ah2lefbwpcuw4esv23bxs5a3ab63ply3ntgbsdktepd5v.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 784) % 6 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = (xindex // 14) x2 = (xindex // 1176) x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xe/cxelxvpw3asckozc53rh36773aohp5hqpbp2nos5ymcdqhxvo4bl.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jn/cjnqv3sgcv5x2iz7ij5zdad6ofabcnonrlksgsxu2ob7n274gz6b.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_2 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/6m/c6m6u2ctjb4r4ra3sizrwezzkzegfp2ombflmfg3dwjfci2pen7h.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_4 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120, ), (1, )) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84, ), (1, )) assert_size_stride(primals_10, (4, 84), (84, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 18816, grid=grid(18816), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 480, grid=grid(480), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] triton_poi_fused_relu_5.run(buf11, primals_9, 336, grid=grid(336), stream=stream0) del primals_9 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (84, 4), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((120, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 84), (84, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn.functional as F from torch import nn class CNNCifar(nn.Module): def __init__(self, args): super(CNNCifar, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, args.num_classes) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {'args': _mock_config(num_classes=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (4, 84), (84, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2, 18816, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 4), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class CNNCifarNew(nn.Module): def __init__(self, args): super(CNNCifarNew, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, args.num_classes) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
FANJIYU0825/federated-learning
CNNCifar
false
1,874
[ "MIT" ]
0
5772ca0a321a222eae5d5e29b70fb4a468c28374
https://github.com/FANJIYU0825/federated-learning/tree/5772ca0a321a222eae5d5e29b70fb4a468c28374
from _paritybench_helpers import _mock_config import torch import torch.nn.functional as F from torch import nn class Model(nn.Module): def __init__(self, args): super().__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, args.num_classes) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return []
TransformerEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py # Topologically Sorted Source Nodes: [src2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # src2 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/zv/czv3tzezwxkylzsgkrivaldxprnr7tvjr5iihe4mbc7bzdev5lsj.py # Topologically Sorted Source Nodes: [src2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # src2 => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/nd/cnduix6bb25yait76qubu4kscpuuqgdjho4akakbftxdxfjk22sq.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul_2 # Graph fragment: # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/y5/cy5gjrtl7netbzcjhig66pdorub2vbq2qvwmv3tamld2ehimmlz7.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ji/cjikooh3unjvssdwbmc5bbgrf7argvwkpdjikzfpajfrzpotlkhf.py # Topologically Sorted Source Nodes: [src, src2_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # src => add_2 # src2_1 => var_mean_1 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %squeeze), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/j4/cj4vucbv6vxdldbfg73k3ixw2brnd6f754oxugjq3s7syrcrb4qe.py # Topologically Sorted Source Nodes: [src, src2_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # src => add_2 # src2_1 => add_3, add_4, mul_3, mul_4, rsqrt_1, sub_2 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %squeeze), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_5, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_6), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_6), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ct/cct7fdnwiat77gmy2crh3kczskgz2h3fhqyydq4w5mawjn5eb6qf.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_8 = async_compile.triton('triton_poi_fused_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/44/c444sh6bryz652bk24ocru63kbqhe67iwwzctt3isl7imfgv5iaa.py # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add] # Source node to ATen node mapping: # src => add_2 # src_1 => add_5 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %squeeze), kwargs = {}) # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {}) triton_poi_fused_add_9 = async_compile.triton('triton_poi_fused_add_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_out_ptr0 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (2048, 4), (4, 1)) assert_size_stride(primals_9, (2048, ), (1, )) assert_size_stride(primals_10, (4, 2048), (2048, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [src2], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 4, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [src2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 16, grid=grid(16), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf6, 16, grid=grid(16), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf8, buf9, 64, grid=grid(64), stream=stream0) del buf8 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf10, buf11, 4, 4, grid=grid(4, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1; del buf1 # reuse buf14 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [src, src2_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(primals_3, buf12, buf13, buf14, 4, grid=grid(4), stream=stream0) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [src, src2_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_7.run(primals_3, buf12, buf13, buf14, primals_6, primals_7, buf15, 16, grid=grid(16), stream=stream0) del buf13 del buf14 del primals_7 buf16 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf15, reinterpret_tensor(primals_8, (4, 2048), (1, 4), 0), out=buf16) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] triton_poi_fused_relu_8.run(buf17, primals_9, 8192, grid=grid(8192), stream=stream0) del primals_9 buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf17, reinterpret_tensor(primals_10, (2048, 4), (1, 2048), 0), out=buf18) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add] triton_poi_fused_add_9.run(buf19, primals_3, buf12, primals_11, 16, grid=grid(16), stream=stream0) del primals_11 return (buf19, primals_3, primals_6, buf2, buf9, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_10, primals_8, primals_5, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn import torch.nn.functional as F import torch.utils.model_zoo def _get_activation_fn(activation): """Return an activation function given a string""" if activation == 'relu': return F.relu if activation == 'gelu': return F.gelu if activation == 'glu': return F.glu raise RuntimeError(f'activation should be relu/gelu, not {activation}.') class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, no_norm=False, activation='relu'): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout= dropout, bias=False) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.norm2 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) nn.init.kaiming_uniform_(self.self_attn.in_proj_weight, a=math.sqrt(5)) def with_pos_embed(self, tensor, pos): return tensor if pos is None else tensor + pos def forward(self, src, pos=None): src2 = self.norm1(src) q = k = self.with_pos_embed(src2, pos) src2 = self.self_attn(q, k, src2) src = src + self.dropout1(src2[0]) src2 = self.norm2(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) src = src + self.dropout2(src2) return src def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn import torch.nn.functional as F import torch.utils.model_zoo assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (2048, 4), (4, 1)) assert_size_stride(primals_9, (2048,), (1,)) assert_size_stride(primals_10, (4, 2048), (2048, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 16), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 32), out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0) del buf3 triton_poi_fused_mul_2[grid(16)](buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf8 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_3, buf12, buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_3, buf12, buf13, buf14, primals_6, primals_7, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf13 del buf14 del primals_7 buf16 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_8, (4, 2048), ( 1, 4), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_8[grid(8192)](buf17, primals_9, 8192, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_10, (2048, 4), (1, 2048), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_add_9[grid(16)](buf19, primals_3, buf12, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 return (buf19, primals_3, primals_6, buf2, buf9, reinterpret_tensor( buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_10, primals_8, primals_5, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0)) def _get_activation_fn(activation): """Return an activation function given a string""" if activation == 'relu': return F.relu if activation == 'gelu': return F.gelu if activation == 'glu': return F.glu raise RuntimeError(f'activation should be relu/gelu, not {activation}.') class TransformerEncoderLayerNew(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, no_norm=False, activation='relu'): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout= dropout, bias=False) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.norm2 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) nn.init.kaiming_uniform_(self.self_attn.in_proj_weight, a=math.sqrt(5)) def with_pos_embed(self, tensor, pos): return tensor if pos is None else tensor + pos def forward(self, input_0): primals_4 = self.self_attn.in_proj_weight primals_3 = self.self_attn.out_proj.weight primals_8 = self.linear1.weight primals_9 = self.linear1.bias primals_10 = self.linear2.weight primals_1 = self.linear2.bias primals_2 = self.norm1.weight primals_6 = self.norm1.bias primals_7 = self.norm2.weight primals_11 = self.norm2.bias primals_5 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
dongyan007/Pretrained-IPT-main-master
TransformerEncoderLayer
false
1,875
[ "Apache-2.0" ]
0
7ed47002373e11bd57b7904f6935acdfba1e44ff
https://github.com/dongyan007/Pretrained-IPT-main-master/tree/7ed47002373e11bd57b7904f6935acdfba1e44ff
import math import torch from torch import nn import torch.nn.functional as F import torch.utils.model_zoo def _get_activation_fn(activation): """Return an activation function given a string""" if activation == 'relu': return F.relu if activation == 'gelu': return F.gelu if activation == 'glu': return F.glu raise RuntimeError(f'activation should be relu/gelu, not {activation}.') class Model(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, no_norm=False, activation='relu'): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout= dropout, bias=False) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.norm2 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) nn.init.kaiming_uniform_(self.self_attn.in_proj_weight, a=math.sqrt(5)) def with_pos_embed(self, tensor, pos): return tensor if pos is None else tensor + pos def forward(self, src, pos=None): src2 = self.norm1(src) q = k = self.with_pos_embed(src2, pos) src2 = self.self_attn(q, k, src2) src = src + self.dropout1(src2[0]) src2 = self.norm2(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) src = src + self.dropout2(src2) return src def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4, 4]
BertOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ss/cssn3ayzwsxbizosd6ieezxafjef3fxscx57lbnlxbdiuph3p2je.py # Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean] # Source node to ATen node mapping: # add => add # u => mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3)) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/l6/cl6vibrzoyykzmbhmvlsdaksh3k2diif7eg66z2ho46tjsy6emma.py # Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub] # Source node to ATen node mapping: # add => add # sub => sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {}) triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4p/c4pnuv3rymhg72qutbvx7mkzv6t7edcefa73bt3nl66b4qtouu4a.py # Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # hidden_states_2 => add_2 # mul => mul # pow_1 => pow_1 # s => mean_1 # sqrt => sqrt # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %div), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {}) triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_0.run(buf0, primals_2, primals_4, buf1, 64, grid=grid(64), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub] triton_poi_fused_add_sub_1.run(buf2, primals_2, primals_4, buf1, 256, grid=grid(256), stream=stream0) del buf1 del primals_2 del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_5, buf2, primals_6, buf3, 256, grid=grid(256), stream=stream0) del primals_6 return (buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.onnx class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5, buf2, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2 class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertOutputNew(nn.Module): def __init__(self, config): super(BertOutputNew, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Alwaysproblem/examples-1
BertOutput
false
1,876
[ "MIT" ]
0
9754fa63ed1931489a21ac1f5b299f945e369a5c
https://github.com/Alwaysproblem/examples-1/tree/9754fa63ed1931489a21ac1f5b299f945e369a5c
from _paritybench_helpers import _mock_config import torch from torch import nn import torch.onnx class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class Model(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
L1DistanceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/du/cduvmb5hpbjgwnwykzo2hhalu225cbhq36a4xnrmox4sitjisfkt.py # Topologically Sorted Source Nodes: [pow_1, ne_1, sum_1, total_sents, gt], Original ATen: [aten.pow, aten.ne, aten.sum, aten._to_copy, aten.gt] # Source node to ATen node mapping: # gt => gt # ne_1 => ne_1 # pow_1 => pow_1 # sum_1 => sum_1 # total_sents => convert_element_type_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg2_1, 2), kwargs = {}) # %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg2_1, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_1,), kwargs = {}) # %convert_element_type_1 : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_1, torch.float32), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convert_element_type_1, 0), kwargs = {}) triton_per_fused__to_copy_gt_ne_pow_sum_0 = async_compile.triton('triton_per_fused__to_copy_gt_ne_pow_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_gt_ne_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_gt_ne_pow_sum_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 * tmp0 tmp2 = 0.0 tmp3 = tmp0 != tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tmp7.to(tl.float32) tmp9 = tmp8 > tmp2 tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp1, None) tl.store(out_ptr2 + (tl.full([1], 0, tl.int32)), tmp8, None) tl.store(out_ptr3 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vo/cvoi446gy7yflmv6g5aq7ogsszwzcewz6p6hjw7csauq7kcquqp3.py # Topologically Sorted Source Nodes: [ne, labels_1s, labels_masked, predictions_masked], Original ATen: [aten.ne, aten._to_copy, aten.mul] # Source node to ATen node mapping: # labels_1s => convert_element_type # labels_masked => mul_1 # ne => ne # predictions_masked => mul # Graph fragment: # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg0_1, -1), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %convert_element_type), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %convert_element_type), kwargs = {}) triton_poi_fused__to_copy_mul_ne_1 = async_compile.triton('triton_poi_fused__to_copy_mul_ne_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_mul_ne_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_mul_ne_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp5 = tl.load(in_ptr1 + (x0), xmask) tmp1 = -1.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tmp0 * tmp3 tmp6 = tmp5 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr1 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = empty_strided_cuda((), (), torch.bool) # Topologically Sorted Source Nodes: [pow_1, ne_1, sum_1, total_sents, gt], Original ATen: [aten.pow, aten.ne, aten.sum, aten._to_copy, aten.gt] stream0 = get_raw_stream(0) triton_per_fused__to_copy_gt_ne_pow_sum_0.run(arg2_1, buf0, buf4, buf5, 1, 256, grid=grid(1), stream=stream0) del arg2_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ne, labels_1s, labels_masked, predictions_masked], Original ATen: [aten.ne, aten._to_copy, aten.mul] triton_poi_fused__to_copy_mul_ne_1.run(arg0_1, arg1_1, buf1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, buf4, buf1, buf2, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class L1DistanceLoss(nn.Module): """Custom L1 loss for distance matrices.""" def __init__(self, args): super(L1DistanceLoss, self).__init__() self.args = args self.word_pair_dims = 1, 2 def forward(self, predictions, label_batch, length_batch): """ Computes L1 loss on distance matrices. Ignores all entries where label_batch=-1 Normalizes first within sentences (by dividing by the square of the sentence length) and then across the batch. Args: predictions: A pytorch batch of predicted distances label_batch: A pytorch batch of true distances length_batch: A pytorch batch of sentence lengths Returns: A tuple of: batch_loss: average loss in the batch total_sents: number of sentences in the batch """ labels_1s = (label_batch != -1).float() predictions_masked = predictions * labels_1s labels_masked = label_batch * labels_1s total_sents = torch.sum(length_batch != 0).float() squared_lengths = length_batch.pow(2).float() if total_sents > 0: loss_per_sent = torch.sum(torch.abs(predictions_masked - labels_masked), dim=self.word_pair_dims) normalized_loss_per_sent = loss_per_sent / squared_lengths batch_loss = torch.sum(normalized_loss_per_sent) / total_sents else: batch_loss = torch.tensor(0.0, device=self.args['device']) return batch_loss, total_sents def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'args': _mock_config()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_gt_ne_pow_sum_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 * tmp0 tmp2 = 0.0 tmp3 = tmp0 != tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tmp7.to(tl.float32) tmp9 = tmp8 > tmp2 tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp1, None) tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp8, None) tl.store(out_ptr3 + tl.full([1], 0, tl.int32), tmp9, None) @triton.jit def triton_poi_fused__to_copy_mul_ne_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask) tmp1 = -1.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tmp0 * tmp3 tmp6 = tmp5 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp6, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((), (), torch.float32) buf5 = empty_strided_cuda((), (), torch.bool) get_raw_stream(0) triton_per_fused__to_copy_gt_ne_pow_sum_0[grid(1)](arg2_1, buf0, buf4, buf5, 1, 256, num_warps=2, num_stages=1) del arg2_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__to_copy_mul_ne_1[grid(256)](arg0_1, arg1_1, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf4, buf1, buf2, buf5 class L1DistanceLossNew(nn.Module): """Custom L1 loss for distance matrices.""" def __init__(self, args): super(L1DistanceLossNew, self).__init__() self.args = args self.word_pair_dims = 1, 2 def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
AnReu/structural-probes
L1DistanceLoss
false
1,877
[ "Apache-2.0" ]
0
fdc99dc124fa6df3dbdd5ba48a90f08bb6bf37b7
https://github.com/AnReu/structural-probes/tree/fdc99dc124fa6df3dbdd5ba48a90f08bb6bf37b7
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Model(nn.Module): """Custom L1 loss for distance matrices.""" def __init__(self, args): super().__init__() self.args = args self.word_pair_dims = 1, 2 def forward(self, predictions, label_batch, length_batch): """ Computes L1 loss on distance matrices. Ignores all entries where label_batch=-1 Normalizes first within sentences (by dividing by the square of the sentence length) and then across the batch. Args: predictions: A pytorch batch of predicted distances label_batch: A pytorch batch of true distances length_batch: A pytorch batch of sentence lengths Returns: A tuple of: batch_loss: average loss in the batch total_sents: number of sentences in the batch """ labels_1s = (label_batch != -1).float() predictions_masked = predictions * labels_1s labels_masked = label_batch * labels_1s total_sents = torch.sum(length_batch != 0).float() squared_lengths = length_batch.pow(2).float() if total_sents > 0: loss_per_sent = torch.sum(torch.abs(predictions_masked - labels_masked), dim=self.word_pair_dims) normalized_loss_per_sent = loss_per_sent / squared_lengths batch_loss = torch.sum(normalized_loss_per_sent) / total_sents else: batch_loss = torch.tensor(0.0, device=self.args['device']) return batch_loss, total_sents def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return []
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # q_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/s2/cs2rk3o3kmhydx4oijp6rsdb5atcrq5axy4adadrpl7gkt7scies.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xt/cxtkkmujo4ytg6ycpz5lk5livtstr63pg5nsf5ijewjbtrfrqx6k.py # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # y_2 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_19,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ph/cph4gq6ubasxpb4prwcxu747d6tqcovxnv5auwqkl3hg6xj6lzbf.py # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.add] # Source node to ATen node mapping: # y_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_21, %primals_11), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (1, 4), (4, 1)) assert_size_stride(primals_11, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [k_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [v_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_8, buf8, 16, 4, grid=grid(16, 4), stream=stream0) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del buf9 buf11 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 1), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 1), (4, 1, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf12, primals_11, 16, grid=grid(16), stream=stream0) del primals_11 return (buf12, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def forward(self, query, key, value, mask=None): dk = query.size()[-1] scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) attention = F.softmax(scores, dim=-1) return attention.matmul(value) class MultiHeadAttention(nn.Module): def __init__(self, in_features, head_num, bias=True, activation=None): super(MultiHeadAttention, self).__init__() if in_features % head_num != 0: raise ValueError( '`in_features`({}) should be divisible by `head_num`({})'. format(in_features, head_num)) self.in_features = in_features self.head_num = head_num self.activation = activation self.bias = bias self.linear_q = nn.Linear(in_features, in_features, bias) self.linear_k = nn.Linear(in_features, in_features, bias) self.linear_v = nn.Linear(in_features, in_features, bias) self.linear_o = nn.Linear(in_features, in_features // self.head_num, bias) def forward(self, q, k, v, mask=None): q, k, v = self.linear_q(q), self.linear_k(k), self.linear_v(v) if self.activation is not None: q = self.activation(q) k = self.activation(k) v = self.activation(v) q = self._reshape_to_batches(q) k = self._reshape_to_batches(k) v = self._reshape_to_batches(v) if mask is not None: mask = mask.repeat(self.head_num, 1, 1) y = ScaledDotProductAttention()(q, k, v, mask) y = self._reshape_from_batches(y) y = self.linear_o(y) if self.activation is not None: y = self.activation(y) return y @staticmethod def gen_history_mask(x): """Generate the mask that only uses history data. :param x: Input tensor. :return: The mask. """ batch_size, seq_len, _ = x.size() return torch.tril(torch.ones(seq_len, seq_len)).view(1, seq_len, seq_len).repeat(batch_size, 1, 1) def _reshape_to_batches(self, x): batch_size, seq_len, in_feature = x.size() sub_dim = in_feature // self.head_num return x.reshape(batch_size, seq_len, self.head_num, sub_dim).permute( 0, 2, 1, 3).reshape(batch_size * self.head_num, seq_len, sub_dim) def _reshape_from_batches(self, x): batch_size, seq_len, in_feature = x.size() batch_size //= self.head_num out_dim = in_feature * self.head_num return x.reshape(batch_size, self.head_num, seq_len, in_feature ).permute(0, 2, 1, 3).reshape(batch_size, seq_len, out_dim) def extra_repr(self): return 'in_features={}, head_num={}, bias={}, activation={}'.format( self.in_features, self.head_num, self.bias, self.activation) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'in_features': 4, 'head_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_10, (1, 4), (4, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_8, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf9 buf11 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 1), (1, 4), 0), out=buf11) buf12 = reinterpret_tensor(buf11, (4, 4, 1), (4, 1, 1), 0) del buf11 triton_poi_fused_add_4[grid(16)](buf12, primals_11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_11 return buf12, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0 ), primals_10, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) class ScaledDotProductAttention(nn.Module): def forward(self, query, key, value, mask=None): dk = query.size()[-1] scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) attention = F.softmax(scores, dim=-1) return attention.matmul(value) class MultiHeadAttentionNew(nn.Module): def __init__(self, in_features, head_num, bias=True, activation=None): super(MultiHeadAttentionNew, self).__init__() if in_features % head_num != 0: raise ValueError( '`in_features`({}) should be divisible by `head_num`({})'. format(in_features, head_num)) self.in_features = in_features self.head_num = head_num self.activation = activation self.bias = bias self.linear_q = nn.Linear(in_features, in_features, bias) self.linear_k = nn.Linear(in_features, in_features, bias) self.linear_v = nn.Linear(in_features, in_features, bias) self.linear_o = nn.Linear(in_features, in_features // self.head_num, bias) @staticmethod def gen_history_mask(x): """Generate the mask that only uses history data. :param x: Input tensor. :return: The mask. """ batch_size, seq_len, _ = x.size() return torch.tril(torch.ones(seq_len, seq_len)).view(1, seq_len, seq_len).repeat(batch_size, 1, 1) def _reshape_to_batches(self, x): batch_size, seq_len, in_feature = x.size() sub_dim = in_feature // self.head_num return x.reshape(batch_size, seq_len, self.head_num, sub_dim).permute( 0, 2, 1, 3).reshape(batch_size * self.head_num, seq_len, sub_dim) def _reshape_from_batches(self, x): batch_size, seq_len, in_feature = x.size() batch_size //= self.head_num out_dim = in_feature * self.head_num return x.reshape(batch_size, self.head_num, seq_len, in_feature ).permute(0, 2, 1, 3).reshape(batch_size, seq_len, out_dim) def extra_repr(self): return 'in_features={}, head_num={}, bias={}, activation={}'.format( self.in_features, self.head_num, self.bias, self.activation) def forward(self, input_0, input_1, input_2): primals_1 = self.linear_q.weight primals_2 = self.linear_q.bias primals_4 = self.linear_k.weight primals_5 = self.linear_k.bias primals_7 = self.linear_v.weight primals_8 = self.linear_v.bias primals_10 = self.linear_o.weight primals_11 = self.linear_o.bias primals_3 = input_0 primals_6 = input_1 primals_9 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
dukeNashor/CaptainStony
MultiHeadAttention
false
1,878
[ "MIT" ]
0
6320a27420e686666a4d7172437cf55fe42de2b6
https://github.com/dukeNashor/CaptainStony/tree/6320a27420e686666a4d7172437cf55fe42de2b6
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def forward(self, query, key, value, mask=None): dk = query.size()[-1] scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) attention = F.softmax(scores, dim=-1) return attention.matmul(value) class Model(nn.Module): def __init__(self, in_features, head_num, bias=True, activation=None): super().__init__() if in_features % head_num != 0: raise ValueError( '`in_features`({}) should be divisible by `head_num`({})'. format(in_features, head_num)) self.in_features = in_features self.head_num = head_num self.activation = activation self.bias = bias self.linear_q = nn.Linear(in_features, in_features, bias) self.linear_k = nn.Linear(in_features, in_features, bias) self.linear_v = nn.Linear(in_features, in_features, bias) self.linear_o = nn.Linear(in_features, in_features // self.head_num, bias) def forward(self, q, k, v, mask=None): q, k, v = self.linear_q(q), self.linear_k(k), self.linear_v(v) if self.activation is not None: q = self.activation(q) k = self.activation(k) v = self.activation(v) q = self._reshape_to_batches(q) k = self._reshape_to_batches(k) v = self._reshape_to_batches(v) if mask is not None: mask = mask.repeat(self.head_num, 1, 1) y = ScaledDotProductAttention()(q, k, v, mask) y = self._reshape_from_batches(y) y = self.linear_o(y) if self.activation is not None: y = self.activation(y) return y @staticmethod def gen_history_mask(x): """Generate the mask that only uses history data. :param x: Input tensor. :return: The mask. """ batch_size, seq_len, _ = x.size() return torch.tril(torch.ones(seq_len, seq_len)).view(1, seq_len, seq_len).repeat(batch_size, 1, 1) def _reshape_to_batches(self, x): batch_size, seq_len, in_feature = x.size() sub_dim = in_feature // self.head_num return x.reshape(batch_size, seq_len, self.head_num, sub_dim).permute( 0, 2, 1, 3).reshape(batch_size * self.head_num, seq_len, sub_dim) def _reshape_from_batches(self, x): batch_size, seq_len, in_feature = x.size() batch_size //= self.head_num out_dim = in_feature * self.head_num return x.reshape(batch_size, self.head_num, seq_len, in_feature ).permute(0, 2, 1, 3).reshape(batch_size, seq_len, out_dim) def extra_repr(self): return 'in_features={}, head_num={}, bias={}, activation={}'.format( self.in_features, self.head_num, self.bias, self.activation) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [4, 4]
CRF
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/pg/cpg5vjqssrlsnglazs6sllqz33jc7cdnlonls4f4kcehzuhy7ewb.py # Topologically Sorted Source Nodes: [feats_norm], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # feats_norm => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2], True), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) triton_poi_fused_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/n5/cn5teeeignn4sp3kp3ujrwymtwcb7yaywvx3h7kpqyi4ixhma2ot.py # Topologically Sorted Source Nodes: [pairwise_sim], Original ATen: [aten.div] # Source node to ATen node mapping: # pairwise_sim => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm_1, %bmm), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_out_ptr0 + (x0), xmask) tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/zt/cztirch7jrz5e7flqpqcr5acbutu5sz47zvhuaocrfzdpb7umhxt.py # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_1, sub, mul_2, sub_1, pairwise_potential_E], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] # Source node to ATen node mapping: # W_sym => div_1 # add => add # mul_1 => mul_1 # mul_2 => mul_2 # pairwise_potential => mul # pairwise_potential_E => sum_2 # sub => sub # sub_1 => sub_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %permute_2), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {}) # %mul : [num_users=20] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, %mul), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %permute_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %mul), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_1, [2], True), kwargs = {}) triton_poi_fused_add_div_mul_rsub_sub_sum_2 = async_compile.triton('triton_poi_fused_add_div_mul_rsub_sub_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_rsub_sub_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_rsub_sub_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp2 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp16 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp29 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp42 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp5 = tmp3 + tmp4 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = tmp2 * tmp7 tmp9 = tmp1 * tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp1 tmp12 = tmp11 * tmp8 tmp13 = tmp9 - tmp12 tmp15 = tl.sigmoid(tmp14) tmp19 = tmp17 + tmp18 tmp20 = tmp19 * tmp6 tmp21 = tmp16 * tmp20 tmp22 = tmp15 * tmp21 tmp23 = tmp10 - tmp15 tmp24 = tmp23 * tmp21 tmp25 = tmp22 - tmp24 tmp26 = tmp13 + tmp25 tmp28 = tl.sigmoid(tmp27) tmp32 = tmp30 + tmp31 tmp33 = tmp32 * tmp6 tmp34 = tmp29 * tmp33 tmp35 = tmp28 * tmp34 tmp36 = tmp10 - tmp28 tmp37 = tmp36 * tmp34 tmp38 = tmp35 - tmp37 tmp39 = tmp26 + tmp38 tmp41 = tl.sigmoid(tmp40) tmp45 = tmp43 + tmp44 tmp46 = tmp45 * tmp6 tmp47 = tmp42 * tmp46 tmp48 = tmp41 * tmp47 tmp49 = tmp10 - tmp41 tmp50 = tmp49 * tmp47 tmp51 = tmp48 - tmp50 tmp52 = tmp39 + tmp51 tl.store(out_ptr0 + (x2), tmp52, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4y/c4y7ef5neroioo45nhdxozozllc2bmzg27jz2uy5fyq34ehhexcn.py # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_3, sub_2, mul_4, sub_3, pairwise_potential_E_1], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] # Source node to ATen node mapping: # W_sym => div_1 # add => add # mul_3 => mul_3 # mul_4 => mul_4 # pairwise_potential => mul # pairwise_potential_E_1 => sum_3 # sub_2 => sub_2 # sub_3 => sub_3 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %permute_2), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {}) # %mul : [num_users=20] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_4, %mul), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %permute_4), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %mul), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_3, [2], True), kwargs = {}) triton_poi_fused_add_div_mul_rsub_sub_sum_3 = async_compile.triton('triton_poi_fused_add_div_mul_rsub_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_rsub_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_rsub_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + (4*x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp17 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp32 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr2 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp46 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp47 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp51 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp52 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp7 = tmp5 + tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tmp4 * tmp9 tmp11 = tmp3 * tmp10 tmp12 = 1.0 tmp13 = tmp12 - tmp3 tmp14 = tmp13 * tmp10 tmp15 = tmp11 - tmp14 tmp18 = tmp16 + tmp17 tmp19 = tl.sigmoid(tmp18) tmp23 = tmp21 + tmp22 tmp24 = tmp23 * tmp8 tmp25 = tmp20 * tmp24 tmp26 = tmp19 * tmp25 tmp27 = tmp12 - tmp19 tmp28 = tmp27 * tmp25 tmp29 = tmp26 - tmp28 tmp30 = tmp15 + tmp29 tmp33 = tmp31 + tmp32 tmp34 = tl.sigmoid(tmp33) tmp38 = tmp36 + tmp37 tmp39 = tmp38 * tmp8 tmp40 = tmp35 * tmp39 tmp41 = tmp34 * tmp40 tmp42 = tmp12 - tmp34 tmp43 = tmp42 * tmp40 tmp44 = tmp41 - tmp43 tmp45 = tmp30 + tmp44 tmp48 = tmp46 + tmp47 tmp49 = tl.sigmoid(tmp48) tmp53 = tmp51 + tmp52 tmp54 = tmp53 * tmp8 tmp55 = tmp50 * tmp54 tmp56 = tmp49 * tmp55 tmp57 = tmp12 - tmp49 tmp58 = tmp57 * tmp55 tmp59 = tmp56 - tmp58 tmp60 = tmp45 + tmp59 tl.store(out_ptr0 + (x2), tmp60, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/lu/cluvgpnuehlfe66u3wqcj3n6cyvp6elikx7xp4egoya6c67hvs6p.py # Topologically Sorted Source Nodes: [logits_9], Original ATen: [aten.add] # Source node to ATen node mapping: # logits_9 => add_10 # Graph fragment: # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %sum_11), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [feats_norm], Original ATen: [aten.linalg_vector_norm] stream0 = get_raw_stream(0) triton_poi_fused_linalg_vector_norm_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pairwise_norm], Original ATen: [aten.bmm] extern_kernels.bmm(buf0, reinterpret_tensor(buf0, (4, 1, 4), (4, 16, 1), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pairwise_dot], Original ATen: [aten.bmm] extern_kernels.bmm(primals_1, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), out=buf2) del primals_1 buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [pairwise_sim], Original ATen: [aten.div] triton_poi_fused_div_1.run(buf3, buf2, 64, grid=grid(64), stream=stream0) buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_1, sub, mul_2, sub_1, pairwise_potential_E], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] triton_poi_fused_add_div_mul_rsub_sub_sum_2.run(primals_3, buf3, primals_2, buf4, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_3, sub_2, mul_4, sub_3, pairwise_potential_E_1], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf4, buf3, primals_2, buf5, 16, grid=grid(16), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_5, sub_4, mul_6, sub_5, pairwise_potential_E_2], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf5, buf3, primals_2, buf6, 16, grid=grid(16), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_7, sub_6, mul_8, sub_7, pairwise_potential_E_3], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf6, buf3, primals_2, buf7, 16, grid=grid(16), stream=stream0) buf8 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_9, sub_8, mul_10, sub_9, pairwise_potential_E_4], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf7, buf3, primals_2, buf8, 16, grid=grid(16), stream=stream0) buf9 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_11, sub_10, mul_12, sub_11, pairwise_potential_E_5], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf8, buf3, primals_2, buf9, 16, grid=grid(16), stream=stream0) buf10 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_13, sub_12, mul_14, sub_13, pairwise_potential_E_6], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf9, buf3, primals_2, buf10, 16, grid=grid(16), stream=stream0) buf11 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_15, sub_14, mul_16, sub_15, pairwise_potential_E_7], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf10, buf3, primals_2, buf11, 16, grid=grid(16), stream=stream0) buf12 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_17, sub_16, mul_18, sub_17, pairwise_potential_E_8], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf11, buf3, primals_2, buf12, 16, grid=grid(16), stream=stream0) buf13 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_19, sub_18, mul_20, sub_19, pairwise_potential_E_9], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum] triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf12, buf3, primals_2, buf13, 16, grid=grid(16), stream=stream0) del buf12 buf14 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [logits_9], Original ATen: [aten.add] triton_poi_fused_add_4.run(primals_3, buf13, buf14, 64, grid=grid(64), stream=stream0) del buf13 return (buf14, primals_2, primals_3, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class CRF(nn.Module): def __init__(self, num_nodes, iteration=10): """Initialize the CRF module Args: num_nodes: int, number of nodes/patches within the fully CRF iteration: int, number of mean field iterations, e.g. 10 """ super(CRF, self).__init__() self.num_nodes = num_nodes self.iteration = iteration self.W = nn.Parameter(torch.zeros(1, num_nodes, num_nodes)) def forward(self, feats, logits): """Performing the CRF. Algorithm details is explained below: Within the paper, I formulate the CRF distribution using negative energy and cost, e.g. cosine distance, to derive pairwise potentials following the convention in energy based models. But for implementation simplicity, I use reward, e.g. cosine similarity to derive pairwise potentials. So now, pairwise potentials would encourage high reward for assigning (y_i, y_j) with the same label if (x_i, x_j) are similar, as measured by cosine similarity, pairwise_sim. For pairwise_potential_E = torch.sum( probs * pairwise_potential - (1 - probs) * pairwise_potential, dim=2, keepdim=True ) This is taking the expectation of pairwise potentials using the current marginal distribution of each patch being tumor, i.e. probs. There are four cases to consider when taking the expectation between (i, j): 1. i=T,j=T; 2. i=N,j=T; 3. i=T,j=N; 4. i=N,j=N probs is the marginal distribution of each i being tumor, therefore logits > 0 means tumor and logits < 0 means normal. Given this, the full expectation equation should be: [probs * +pairwise_potential] + [(1 - probs) * +pairwise_potential] + case 1 case 2 [probs * -pairwise_potential] + [(1 - probs) * -pairwise_potential] case 3 case 4 positive sign rewards logits to be more tumor and negative sign rewards logits to be more normal. But because of label compatibility, i.e. the indicator function within equation 3 in the paper, case 2 and case 3 are dropped, which ends up being: probs * pairwise_potential - (1 - probs) * pairwise_potential In high level speaking, if (i, j) embedding are different, then pairwise_potential, as computed as cosine similarity, would approach 0, which then as no affect anyway. if (i, j) embedding are similar, then pairwise_potential would be a positive reward. In this case, if probs -> 1, then pairwise_potential promotes tumor probability; if probs -> 0, then -pairwise_potential promotes normal probability. Args: feats: 3D tensor with the shape of [batch_size, num_nodes, embedding_size], where num_nodes is the number of patches within a grid, e.g. 9 for a 3x3 grid; embedding_size is the size of extracted feature representation for each patch from ResNet, e.g. 512 logits: 3D tensor with shape of [batch_size, num_nodes, 1], the logit of each patch within the grid being tumor before CRF Returns: logits: 3D tensor with shape of [batch_size, num_nodes, 1], the logit of each patch within the grid being tumor after CRF """ feats_norm = torch.norm(feats, p=2, dim=2, keepdim=True) pairwise_norm = torch.bmm(feats_norm, torch.transpose(feats_norm, 1, 2) ) pairwise_dot = torch.bmm(feats, torch.transpose(feats, 1, 2)) pairwise_sim = pairwise_dot / pairwise_norm W_sym = (self.W + torch.transpose(self.W, 1, 2)) / 2 pairwise_potential = pairwise_sim * W_sym unary_potential = logits.clone() for i in range(self.iteration): probs = torch.transpose(logits.sigmoid(), 1, 2) pairwise_potential_E = torch.sum(probs * pairwise_potential - ( 1 - probs) * pairwise_potential, dim=2, keepdim=True) logits = unary_potential + pairwise_potential_E return logits def __repr__(self): return 'CRF(num_nodes={}, iteration={})'.format(self.num_nodes, self.iteration) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_nodes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tmp0 / tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_div_mul_rsub_sub_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp2 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp16 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp29 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp42 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp43 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp44 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp5 = tmp3 + tmp4 tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = tmp2 * tmp7 tmp9 = tmp1 * tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp1 tmp12 = tmp11 * tmp8 tmp13 = tmp9 - tmp12 tmp15 = tl.sigmoid(tmp14) tmp19 = tmp17 + tmp18 tmp20 = tmp19 * tmp6 tmp21 = tmp16 * tmp20 tmp22 = tmp15 * tmp21 tmp23 = tmp10 - tmp15 tmp24 = tmp23 * tmp21 tmp25 = tmp22 - tmp24 tmp26 = tmp13 + tmp25 tmp28 = tl.sigmoid(tmp27) tmp32 = tmp30 + tmp31 tmp33 = tmp32 * tmp6 tmp34 = tmp29 * tmp33 tmp35 = tmp28 * tmp34 tmp36 = tmp10 - tmp28 tmp37 = tmp36 * tmp34 tmp38 = tmp35 - tmp37 tmp39 = tmp26 + tmp38 tmp41 = tl.sigmoid(tmp40) tmp45 = tmp43 + tmp44 tmp46 = tmp45 * tmp6 tmp47 = tmp42 * tmp46 tmp48 = tmp41 * tmp47 tmp49 = tmp10 - tmp41 tmp50 = tmp49 * tmp47 tmp51 = tmp48 - tmp50 tmp52 = tmp39 + tmp51 tl.store(out_ptr0 + x2, tmp52, xmask) @triton.jit def triton_poi_fused_add_div_mul_rsub_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp17 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp32 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp35 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp36 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp37 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last') tmp46 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp47 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp50 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp51 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp52 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp7 = tmp5 + tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tmp4 * tmp9 tmp11 = tmp3 * tmp10 tmp12 = 1.0 tmp13 = tmp12 - tmp3 tmp14 = tmp13 * tmp10 tmp15 = tmp11 - tmp14 tmp18 = tmp16 + tmp17 tmp19 = tl.sigmoid(tmp18) tmp23 = tmp21 + tmp22 tmp24 = tmp23 * tmp8 tmp25 = tmp20 * tmp24 tmp26 = tmp19 * tmp25 tmp27 = tmp12 - tmp19 tmp28 = tmp27 * tmp25 tmp29 = tmp26 - tmp28 tmp30 = tmp15 + tmp29 tmp33 = tmp31 + tmp32 tmp34 = tl.sigmoid(tmp33) tmp38 = tmp36 + tmp37 tmp39 = tmp38 * tmp8 tmp40 = tmp35 * tmp39 tmp41 = tmp34 * tmp40 tmp42 = tmp12 - tmp34 tmp43 = tmp42 * tmp40 tmp44 = tmp41 - tmp43 tmp45 = tmp30 + tmp44 tmp48 = tmp46 + tmp47 tmp49 = tl.sigmoid(tmp48) tmp53 = tmp51 + tmp52 tmp54 = tmp53 * tmp8 tmp55 = tmp50 * tmp54 tmp56 = tmp49 * tmp55 tmp57 = tmp12 - tmp49 tmp58 = tmp57 * tmp55 tmp59 = tmp56 - tmp58 tmp60 = tmp45 + tmp59 tl.store(out_ptr0 + x2, tmp60, xmask) @triton.jit def triton_poi_fused_add_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_linalg_vector_norm_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf0, reinterpret_tensor(buf0, (4, 1, 4), (4, 16, 1), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), out=buf2) del primals_1 buf3 = buf1 del buf1 triton_poi_fused_div_1[grid(64)](buf3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf0 del buf0 triton_poi_fused_add_div_mul_rsub_sub_sum_2[grid(16)](primals_3, buf3, primals_2, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3, buf4, buf3, primals_2, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3, buf5, buf3, primals_2, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3, buf6, buf3, primals_2, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3, buf7, buf3, primals_2, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3, buf8, buf3, primals_2, buf9, 16, XBLOCK=16, num_warps=1, num_stages=1) buf10 = buf8 del buf8 triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3, buf9, buf3, primals_2, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = buf9 del buf9 triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3, buf10, buf3, primals_2, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = buf10 del buf10 triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3, buf11, buf3, primals_2, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf13 = buf11 del buf11 triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3, buf12, buf3, primals_2, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf12 buf14 = buf2 del buf2 triton_poi_fused_add_4[grid(64)](primals_3, buf13, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf13 return buf14, primals_2, primals_3, buf3 class CRFNew(nn.Module): def __init__(self, num_nodes, iteration=10): """Initialize the CRF module Args: num_nodes: int, number of nodes/patches within the fully CRF iteration: int, number of mean field iterations, e.g. 10 """ super(CRFNew, self).__init__() self.num_nodes = num_nodes self.iteration = iteration self.W = nn.Parameter(torch.zeros(1, num_nodes, num_nodes)) def __repr__(self): return 'CRF(num_nodes={}, iteration={})'.format(self.num_nodes, self.iteration) def forward(self, input_0, input_1): primals_2 = self.W primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
dradientgescent/NCRF
CRF
false
1,879
[ "Apache-2.0" ]
0
21e95c0e0f965de2b1daa2d446306052b3703b6a
https://github.com/dradientgescent/NCRF/tree/21e95c0e0f965de2b1daa2d446306052b3703b6a
import torch from torch import nn class Model(nn.Module): def __init__(self, num_nodes, iteration=10): """Initialize the CRF module Args: num_nodes: int, number of nodes/patches within the fully CRF iteration: int, number of mean field iterations, e.g. 10 """ super().__init__() self.num_nodes = num_nodes self.iteration = iteration self.W = nn.Parameter(torch.zeros(1, num_nodes, num_nodes)) def forward(self, feats, logits): """Performing the CRF. Algorithm details is explained below: Within the paper, I formulate the CRF distribution using negative energy and cost, e.g. cosine distance, to derive pairwise potentials following the convention in energy based models. But for implementation simplicity, I use reward, e.g. cosine similarity to derive pairwise potentials. So now, pairwise potentials would encourage high reward for assigning (y_i, y_j) with the same label if (x_i, x_j) are similar, as measured by cosine similarity, pairwise_sim. For pairwise_potential_E = torch.sum( probs * pairwise_potential - (1 - probs) * pairwise_potential, dim=2, keepdim=True ) This is taking the expectation of pairwise potentials using the current marginal distribution of each patch being tumor, i.e. probs. There are four cases to consider when taking the expectation between (i, j): 1. i=T,j=T; 2. i=N,j=T; 3. i=T,j=N; 4. i=N,j=N probs is the marginal distribution of each i being tumor, therefore logits > 0 means tumor and logits < 0 means normal. Given this, the full expectation equation should be: [probs * +pairwise_potential] + [(1 - probs) * +pairwise_potential] + case 1 case 2 [probs * -pairwise_potential] + [(1 - probs) * -pairwise_potential] case 3 case 4 positive sign rewards logits to be more tumor and negative sign rewards logits to be more normal. But because of label compatibility, i.e. the indicator function within equation 3 in the paper, case 2 and case 3 are dropped, which ends up being: probs * pairwise_potential - (1 - probs) * pairwise_potential In high level speaking, if (i, j) embedding are different, then pairwise_potential, as computed as cosine similarity, would approach 0, which then as no affect anyway. if (i, j) embedding are similar, then pairwise_potential would be a positive reward. In this case, if probs -> 1, then pairwise_potential promotes tumor probability; if probs -> 0, then -pairwise_potential promotes normal probability. Args: feats: 3D tensor with the shape of [batch_size, num_nodes, embedding_size], where num_nodes is the number of patches within a grid, e.g. 9 for a 3x3 grid; embedding_size is the size of extracted feature representation for each patch from ResNet, e.g. 512 logits: 3D tensor with shape of [batch_size, num_nodes, 1], the logit of each patch within the grid being tumor before CRF Returns: logits: 3D tensor with shape of [batch_size, num_nodes, 1], the logit of each patch within the grid being tumor after CRF """ feats_norm = torch.norm(feats, p=2, dim=2, keepdim=True) pairwise_norm = torch.bmm(feats_norm, torch.transpose(feats_norm, 1, 2) ) pairwise_dot = torch.bmm(feats, torch.transpose(feats, 1, 2)) pairwise_sim = pairwise_dot / pairwise_norm W_sym = (self.W + torch.transpose(self.W, 1, 2)) / 2 pairwise_potential = pairwise_sim * W_sym unary_potential = logits.clone() for i in range(self.iteration): probs = torch.transpo # ... truncated (>4000 chars) for memory efficiency
L1DepthLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/vw/cvw56h5ri54hrna7duzfdrodishi5abxn5plg4lwgig6fevc7ryu.py # Topologically Sorted Source Nodes: [ne_1, labels_1s, labels_masked, predictions_masked], Original ATen: [aten.ne, aten._to_copy, aten.mul] # Source node to ATen node mapping: # labels_1s => convert_element_type_1 # labels_masked => mul_1 # ne_1 => ne_1 # predictions_masked => mul # Graph fragment: # %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg1_1, -1), kwargs = {}) # %convert_element_type_1 : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne_1, torch.float32), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %convert_element_type_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %convert_element_type_1), kwargs = {}) triton_poi_fused__to_copy_mul_ne_0 = async_compile.triton('triton_poi_fused__to_copy_mul_ne_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_mul_ne_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_mul_ne_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp5 = tl.load(in_ptr1 + (x0), xmask) tmp1 = -1.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tmp0 * tmp3 tmp6 = tmp5 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr1 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4h/c4h2gxzjjtb6pgifnf4w3l34kbyjis7mca2psf5ba3iotwzkl2qr.py # Topologically Sorted Source Nodes: [ne, sum_1, total_sents, gt], Original ATen: [aten.ne, aten.sum, aten._to_copy, aten.gt] # Source node to ATen node mapping: # gt => gt # ne => ne # sum_1 => sum_1 # total_sents => convert_element_type # Graph fragment: # %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg0_1, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne,), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_1, torch.float32), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convert_element_type, 0), kwargs = {}) triton_per_fused__to_copy_gt_ne_sum_1 = async_compile.triton('triton_per_fused__to_copy_gt_ne_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_gt_ne_sum_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_gt_ne_sum_1(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.int64) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = tmp6.to(tl.float32) tmp8 = tmp7 > tmp1 tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp7, None) tl.store(out_ptr2 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ne_1, labels_1s, labels_masked, predictions_masked], Original ATen: [aten.ne, aten._to_copy, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_mul_ne_0.run(arg1_1, arg2_1, buf0, buf1, 256, grid=grid(256), stream=stream0) del arg1_1 del arg2_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = empty_strided_cuda((), (), torch.bool) # Topologically Sorted Source Nodes: [ne, sum_1, total_sents, gt], Original ATen: [aten.ne, aten.sum, aten._to_copy, aten.gt] triton_per_fused__to_copy_gt_ne_sum_1.run(arg0_1, buf3, buf4, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf0, buf1, buf3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class L1DepthLoss(nn.Module): """Custom L1 loss for depth sequences.""" def __init__(self, args): super(L1DepthLoss, self).__init__() self.args = args self.word_dim = 1 def forward(self, predictions, label_batch, length_batch): """ Computes L1 loss on depth sequences. Ignores all entries where label_batch=-1 Normalizes first within sentences (by dividing by the sentence length) and then across the batch. Args: predictions: A pytorch batch of predicted depths label_batch: A pytorch batch of true depths length_batch: A pytorch batch of sentence lengths Returns: A tuple of: batch_loss: average loss in the batch total_sents: number of sentences in the batch """ total_sents = torch.sum(length_batch != 0).float() labels_1s = (label_batch != -1).float() predictions_masked = predictions * labels_1s labels_masked = label_batch * labels_1s if total_sents > 0: loss_per_sent = torch.sum(torch.abs(predictions_masked - labels_masked), dim=self.word_dim) normalized_loss_per_sent = loss_per_sent / length_batch.float() batch_loss = torch.sum(normalized_loss_per_sent) / total_sents else: batch_loss = torch.tensor(0.0, device=self.args['device']) return batch_loss, total_sents def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'args': _mock_config()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_mul_ne_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp5 = tl.load(in_ptr1 + x0, xmask) tmp1 = -1.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tmp0 * tmp3 tmp6 = tmp5 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp6, xmask) @triton.jit def triton_per_fused__to_copy_gt_ne_sum_1(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.int64) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = tmp6.to(tl.float32) tmp8 = tmp7 > tmp1 tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp7, None) tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_mul_ne_0[grid(256)](arg1_1, arg2_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 del arg2_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = empty_strided_cuda((), (), torch.bool) triton_per_fused__to_copy_gt_ne_sum_1[grid(1)](arg0_1, buf3, buf4, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf0, buf1, buf3, buf4 class L1DepthLossNew(nn.Module): """Custom L1 loss for depth sequences.""" def __init__(self, args): super(L1DepthLossNew, self).__init__() self.args = args self.word_dim = 1 def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
AnReu/structural-probes
L1DepthLoss
false
1,880
[ "Apache-2.0" ]
0
fdc99dc124fa6df3dbdd5ba48a90f08bb6bf37b7
https://github.com/AnReu/structural-probes/tree/fdc99dc124fa6df3dbdd5ba48a90f08bb6bf37b7
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Model(nn.Module): """Custom L1 loss for depth sequences.""" def __init__(self, args): super().__init__() self.args = args self.word_dim = 1 def forward(self, predictions, label_batch, length_batch): """ Computes L1 loss on depth sequences. Ignores all entries where label_batch=-1 Normalizes first within sentences (by dividing by the sentence length) and then across the batch. Args: predictions: A pytorch batch of predicted depths label_batch: A pytorch batch of true depths length_batch: A pytorch batch of sentence lengths Returns: A tuple of: batch_loss: average loss in the batch total_sents: number of sentences in the batch """ total_sents = torch.sum(length_batch != 0).float() labels_1s = (label_batch != -1).float() predictions_masked = predictions * labels_1s labels_masked = label_batch * labels_1s if total_sents > 0: loss_per_sent = torch.sum(torch.abs(predictions_masked - labels_masked), dim=self.word_dim) normalized_loss_per_sent = loss_per_sent / length_batch.float() batch_loss = torch.sum(normalized_loss_per_sent) / total_sents else: batch_loss = torch.tensor(0.0, device=self.args['device']) return batch_loss, total_sents def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return []
CriticNetwork
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/sf/csfkqsknybmr6wr4qghsq2q5fkm44yw64vallsxwzclanv6zrtso.py # Topologically Sorted Source Nodes: [add, state_action_value], Original ATen: [aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # add => add # state_action_value => relu # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 <= tmp9 tl.store(in_out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (1, 4), (4, 1)) assert_size_stride(primals_12, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add, state_action_value], Original ATen: [aten.add, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0.run(buf2, primals_2, buf1, primals_5, buf9, 256, grid=grid(256), stream=stream0) del primals_2 del primals_5 buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [add_1, state_action_value_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_relu_threshold_backward_0.run(buf5, primals_8, buf4, primals_10, buf8, 256, grid=grid(256), stream=stream0) del buf4 del primals_10 del primals_8 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [state_action_value_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_12, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_12 return (reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0), primals_11, buf8, primals_9, primals_7, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch as T class CriticNetwork(nn.Module): def __init__(self, beta, input_dims, fc1_dims, fc2_dims, n_actions, name): super(CriticNetwork, self).__init__() self.input_dims = input_dims self.fc1_dims = fc1_dims self.fc2_dims = fc2_dims self.n_actions = n_actions self.name = name self.fc1 = nn.Linear(self.input_dims, self.fc1_dims) self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims) self.action_value = nn.Linear(self.n_actions, self.fc1_dims) self.action_value2 = nn.Linear(self.fc1_dims, self.fc2_dims) self.q = nn.Linear(self.fc2_dims, 1) f1 = 1.0 / np.sqrt(self.fc1.weight.data.size()[0]) self.fc1.weight.data.uniform_(-f1, f1) self.fc1.bias.data.uniform_(-f1, f1) f2 = 1.0 / np.sqrt(self.fc2.weight.data.size()[0]) self.fc2.weight.data.uniform_(-f2, f2) self.fc2.bias.data.uniform_(-f2, f2) f3 = 0.003 self.q.weight.data.uniform_(-f3, f3) self.q.bias.data.uniform_(-f3, f3) f4 = 1.0 / np.sqrt(self.action_value.weight.data.size()[0]) self.action_value.weight.data.uniform_(-f4, f4) self.action_value.bias.data.uniform_(-f4, f4) self.optimizer = optim.Adam(self.parameters(), lr=beta, weight_decay=0.01) self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu') self def forward(self, state, action): state_value = self.fc1(state) action_value = self.action_value(action) state_action_value = F.relu(T.add(state_value, action_value)) state_action_value = F.relu(T.add(self.fc2(state_action_value), self.action_value2(state_action_value))) state_action_value = self.q(state_action_value) return state_action_value def save_checkpoint(self, chkpt_dir='tmp/ddpg'): None checkpoint_file = chkpt_dir + '/' + str(self.name + '_ddpg.pt') T.save(self.state_dict(), checkpoint_file) def load_checkpoint(self, chkpt_dir='tmp/ddpg'): None checkpoint_file = chkpt_dir + '/' + str(self.name + '_ddpg.pt') self.load_state_dict(T.load(checkpoint_file)) def save_best(self, chkpt_dir='best/ddpg'): None checkpoint_file = chkpt_dir + '/' + str(self.name + '_ddpg.pt') T.save(self.state_dict(), checkpoint_file) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'beta': 4, 'input_dims': 4, 'fc1_dims': 4, 'fc2_dims': 4, 'n_actions': 4, 'name': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn import torch.optim as optim import torch as T assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 <= tmp9 tl.store(in_out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (1, 4), (4, 1)) assert_size_stride(primals_12, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf2, primals_2, buf1, primals_5, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_5 buf3 = buf1 del buf1 extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf5, primals_8, buf4, primals_10, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_10 del primals_8 buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_12, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf7) del primals_12 return reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor( buf5, (64, 4), (4, 1), 0), primals_11, buf8, primals_9, primals_7, buf9 class CriticNetworkNew(nn.Module): def __init__(self, beta, input_dims, fc1_dims, fc2_dims, n_actions, name): super(CriticNetworkNew, self).__init__() self.input_dims = input_dims self.fc1_dims = fc1_dims self.fc2_dims = fc2_dims self.n_actions = n_actions self.name = name self.fc1 = nn.Linear(self.input_dims, self.fc1_dims) self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims) self.action_value = nn.Linear(self.n_actions, self.fc1_dims) self.action_value2 = nn.Linear(self.fc1_dims, self.fc2_dims) self.q = nn.Linear(self.fc2_dims, 1) f1 = 1.0 / np.sqrt(self.fc1.weight.data.size()[0]) self.fc1.weight.data.uniform_(-f1, f1) self.fc1.bias.data.uniform_(-f1, f1) f2 = 1.0 / np.sqrt(self.fc2.weight.data.size()[0]) self.fc2.weight.data.uniform_(-f2, f2) self.fc2.bias.data.uniform_(-f2, f2) f3 = 0.003 self.q.weight.data.uniform_(-f3, f3) self.q.bias.data.uniform_(-f3, f3) f4 = 1.0 / np.sqrt(self.action_value.weight.data.size()[0]) self.action_value.weight.data.uniform_(-f4, f4) self.action_value.bias.data.uniform_(-f4, f4) self.optimizer = optim.Adam(self.parameters(), lr=beta, weight_decay=0.01) self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu') self def save_checkpoint(self, chkpt_dir='tmp/ddpg'): None checkpoint_file = chkpt_dir + '/' + str(self.name + '_ddpg.pt') T.save(self.state_dict(), checkpoint_file) def load_checkpoint(self, chkpt_dir='tmp/ddpg'): None checkpoint_file = chkpt_dir + '/' + str(self.name + '_ddpg.pt') self.load_state_dict(T.load(checkpoint_file)) def save_best(self, chkpt_dir='best/ddpg'): None checkpoint_file = chkpt_dir + '/' + str(self.name + '_ddpg.pt') T.save(self.state_dict(), checkpoint_file) def forward(self, input_0, input_1): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_7 = self.action_value.weight primals_8 = self.action_value.bias primals_9 = self.action_value2.weight primals_10 = self.action_value2.bias primals_11 = self.q.weight primals_12 = self.q.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
Yang2581/Behavioral-Cloning
CriticNetwork
false
1,881
[ "MIT" ]
0
426e68a639e3e341f5547cfe40fb03ed8e87f3c8
https://github.com/Yang2581/Behavioral-Cloning/tree/426e68a639e3e341f5547cfe40fb03ed8e87f3c8
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch as T class Model(nn.Module): def __init__(self, beta, input_dims, fc1_dims, fc2_dims, n_actions, name): super().__init__() self.input_dims = input_dims self.fc1_dims = fc1_dims self.fc2_dims = fc2_dims self.n_actions = n_actions self.name = name self.fc1 = nn.Linear(self.input_dims, self.fc1_dims) self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims) self.action_value = nn.Linear(self.n_actions, self.fc1_dims) self.action_value2 = nn.Linear(self.fc1_dims, self.fc2_dims) self.q = nn.Linear(self.fc2_dims, 1) f1 = 1.0 / np.sqrt(self.fc1.weight.data.size()[0]) self.fc1.weight.data.uniform_(-f1, f1) self.fc1.bias.data.uniform_(-f1, f1) f2 = 1.0 / np.sqrt(self.fc2.weight.data.size()[0]) self.fc2.weight.data.uniform_(-f2, f2) self.fc2.bias.data.uniform_(-f2, f2) f3 = 0.003 self.q.weight.data.uniform_(-f3, f3) self.q.bias.data.uniform_(-f3, f3) f4 = 1.0 / np.sqrt(self.action_value.weight.data.size()[0]) self.action_value.weight.data.uniform_(-f4, f4) self.action_value.bias.data.uniform_(-f4, f4) self.optimizer = optim.Adam(self.parameters(), lr=beta, weight_decay=0.01) self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu') self def forward(self, state, action): state_value = self.fc1(state) action_value = self.action_value(action) state_action_value = F.relu(T.add(state_value, action_value)) state_action_value = F.relu(T.add(self.fc2(state_action_value), self.action_value2(state_action_value))) state_action_value = self.q(state_action_value) return state_action_value def save_checkpoint(self, chkpt_dir='tmp/ddpg'): None checkpoint_file = chkpt_dir + '/' + str(self.name + '_ddpg.pt') T.save(self.state_dict(), checkpoint_file) def load_checkpoint(self, chkpt_dir='tmp/ddpg'): None checkpoint_file = chkpt_dir + '/' + str(self.name + '_ddpg.pt') self.load_state_dict(T.load(checkpoint_file)) def save_best(self, chkpt_dir='best/ddpg'): None checkpoint_file = chkpt_dir + '/' + str(self.name + '_ddpg.pt') T.save(self.state_dict(), checkpoint_file) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'beta': 4, 'input_dims': 4, 'fc1_dims': 4, 'fc2_dims': 4, 'n_actions': 4, 'name': 4}]
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/zv/czvfpj3ah2lefbwpcuw4esv23bxs5a3ab63ply3ntgbsdktepd5v.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 784) % 6 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = (xindex // 14) x2 = (xindex // 1176) x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xe/cxelxvpw3asckozc53rh36773aohp5hqpbp2nos5ymcdqhxvo4bl.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jn/cjnqv3sgcv5x2iz7ij5zdad6ofabcnonrlksgsxu2ob7n274gz6b.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_3 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/6m/c6m6u2ctjb4r4ra3sizrwezzkzegfp2ombflmfg3dwjfci2pen7h.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_4 => relu_4 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_2, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_3, (6, ), (1, )) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120, ), (1, )) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84, ), (1, )) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 18816, grid=grid(18816), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 480, grid=grid(480), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu] triton_poi_fused_relu_5.run(buf11, primals_9, 336, grid=grid(336), stream=stream0) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_2, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((120, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((10, 84), (84, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = x x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) F.relu(self.fc1(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 18816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x3 = xindex // 14 x2 = xindex // 1176 x4 = xindex % 1176 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_2, (6, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_3, (6,), (1,)) assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (120, 400), (400, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_3, 18816, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2, buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5, 6400, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6, buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_2, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, input_0): primals_2 = self.conv1.weight primals_3 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
The-very-most-awesome-team-of-cool-kids/02463_Active_Learning
Net
false
1,882
[ "MIT" ]
0
abc35a31996de1c2e3275cf946b6a44f62abb781
https://github.com/The-very-most-awesome-team-of-cool-kids/02463_Active_Learning/tree/abc35a31996de1c2e3275cf946b6a44f62abb781
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = x x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) F.relu(self.fc1(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return []
BertOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ai/cai32p2ssjvpyulvuzcicdszqe3thbavgxn4jeed6uatjnl7yq2s.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/dt/cdtfyiepqseugx5m3udiopa26uo6fdp2fyvmfcoipxuqyqqnb2l6.py # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # hidden_states_2 => add_1, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/mn/cmntyljhuirhsdjg2yosgzllpkpxqedxgoyk6gunquq2rf3kl7u5.py # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # hidden_states_2 => add_1, add_2, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {}) triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf1, primals_2, primals_4, 256, grid=grid(256), stream=stream0) del primals_2 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_5, primals_6, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_6 return (buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.checkpoint class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1 class BertOutputNew(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
MikeWangWZHL/BLIP
BertOutput
false
1,883
[ "BSD-3-Clause" ]
0
b82134f1892a54c8f63b0f4b51bdcb8684e1dc6d
https://github.com/MikeWangWZHL/BLIP/tree/b82134f1892a54c8f63b0f4b51bdcb8684e1dc6d
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.checkpoint class Model(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
PixelNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/uq/cuqlfvqfeznifrvz7odrw3ezlfid2vgqb7wezw6nc6yrrg5447bi.py # Topologically Sorted Source Nodes: [norm, sqrt, norm_1, add, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.sqrt, aten.div, aten.add] # Source node to ATen node mapping: # add => add # norm => pow_1, pow_2, sum_1 # norm_1 => div # sqrt => full_default # truediv_1 => div_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, %full_default), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1e-06), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %add), kwargs = {}) triton_poi_fused_add_div_linalg_vector_norm_sqrt_0 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 0.5 tmp14 = tmp12 * tmp13 tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = tmp0 / tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, sqrt, norm_1, add, truediv_1], Original ATen: [aten.linalg_vector_norm, aten.sqrt, aten.div, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_sqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.cpp_extension def pixel_norm(x, eps=1e-06): """Pixel Normalization. This normalization is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: x (torch.Tensor): Tensor to be normalized. eps (float, optional): Epsilon to avoid dividing zero. Defaults to 1e-6. Returns: torch.Tensor: Normalized tensor. """ if torch.__version__ >= '1.7.0': norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True) else: norm = torch.norm(x, p=2, dim=1, keepdim=True) norm = norm / torch.sqrt(torch.tensor(x.shape[1])) return x / (norm + eps) class PixelNorm(nn.Module): """Pixel Normalization. This module is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: eps (float, optional): Epsilon value. Defaults to 1e-6. """ _abbr_ = 'pn' def __init__(self, in_channels=None, eps=1e-06): super().__init__() self.eps = eps def forward(self, x): """Forward function. Args: x (torch.Tensor): Tensor to be normalized. Returns: torch.Tensor: Normalized tensor. """ return pixel_norm(x, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_linalg_vector_norm_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 0.5 tmp14 = tmp12 * tmp13 tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = tmp0 / tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_linalg_vector_norm_sqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, def pixel_norm(x, eps=1e-06): """Pixel Normalization. This normalization is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: x (torch.Tensor): Tensor to be normalized. eps (float, optional): Epsilon to avoid dividing zero. Defaults to 1e-6. Returns: torch.Tensor: Normalized tensor. """ if torch.__version__ >= '1.7.0': norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True) else: norm = torch.norm(x, p=2, dim=1, keepdim=True) norm = norm / torch.sqrt(torch.tensor(x.shape[1])) return x / (norm + eps) class PixelNormNew(nn.Module): """Pixel Normalization. This module is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: eps (float, optional): Epsilon value. Defaults to 1e-6. """ _abbr_ = 'pn' def __init__(self, in_channels=None, eps=1e-06): super().__init__() self.eps = eps def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bladesaber/mmgeneration
PixelNorm
false
1,884
[ "Apache-2.0" ]
0
158b49f7efd8028f231f6e9ca758ae0e20dd72ae
https://github.com/bladesaber/mmgeneration/tree/158b49f7efd8028f231f6e9ca758ae0e20dd72ae
import torch import torch.nn as nn import torch.utils.cpp_extension def pixel_norm(x, eps=1e-06): """Pixel Normalization. This normalization is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: x (torch.Tensor): Tensor to be normalized. eps (float, optional): Epsilon to avoid dividing zero. Defaults to 1e-6. Returns: torch.Tensor: Normalized tensor. """ if torch.__version__ >= '1.7.0': norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True) else: norm = torch.norm(x, p=2, dim=1, keepdim=True) norm = norm / torch.sqrt(torch.tensor(x.shape[1])) return x / (norm + eps) class Model(nn.Module): """Pixel Normalization. This module is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Args: eps (float, optional): Epsilon value. Defaults to 1e-6. """ _abbr_ = 'pn' def __init__(self, in_channels=None, eps=1e-06): super().__init__() self.eps = eps def forward(self, x): """Forward function. Args: x (torch.Tensor): Tensor to be normalized. Returns: torch.Tensor: Normalized tensor. """ return pixel_norm(x, self.eps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
EncoderBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # out => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # out => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/se/csebvp3bmolgy5c7lrzs2aipfus76vmgeh3xk3enrbbgzf5oq3nn.py # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.native_layer_norm, aten.repeat] # Source node to ATen node mapping: # out => add, add_1, mul, mul_1, rsqrt, sub, var_mean # out_1 => repeat # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%add_1, [1, 1, 4]), kwargs = {}) triton_poi_fused_native_layer_norm_repeat_2 = async_compile.triton('triton_poi_fused_native_layer_norm_repeat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((4*x1) + (x0 % 4)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/3q/c3qlo5h2a7hopa2crhkn6mzwhkfjo6dvlrxttnheh77ukhu2soye.py # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # q_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/2g/c2gbluyeyqil45hjfgkpafvl2dgu3xy73v7dkyzz3d2ktbsbjcnx.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x2), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4p/c4p7nmmaeg3do3k4aqmp6mh43rupubtod4kype3ux64cxp3b3uub.py # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # y_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/pw/cpw6jeu3vr4erot3bahfr3gqwa2vqailcc5xbtuiztd5l6w2zwjm.py # Topologically Sorted Source Nodes: [attention_1, out2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # attention_1 => add_2 # out2 => var_mean_1 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_21, %primals_3), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/hy/chy77uudjf33jqcobrq24clniixoyn2vucadggy6c6n33mq4favb.py # Topologically Sorted Source Nodes: [attention_1, out2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # attention_1 => add_2 # out2 => add_3, add_4, mul_2, mul_3, rsqrt_1, sub_2 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_21, %primals_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_12), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_13), kwargs = {}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/bx/cbxzdkbxnjj4ysq7xrysng3xgzgpomqcvqe5mdc477yiqcwvtclw.py # Topologically Sorted Source Nodes: [attention_1, out3_1], Original ATen: [aten.add] # Source node to ATen node mapping: # attention_1 => add_2 # out3_1 => add_5 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_21, %primals_3), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_23, %add_2), kwargs = {}) triton_poi_fused_add_9 = async_compile.triton('triton_poi_fused_add_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16, ), (1, )) assert_size_stride(primals_6, (16, 16), (16, 1)) assert_size_stride(primals_7, (16, ), (1, )) assert_size_stride(primals_8, (16, 16), (16, 1)) assert_size_stride(primals_9, (16, ), (1, )) assert_size_stride(primals_10, (4, 16), (16, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.native_layer_norm, aten.repeat] triton_poi_fused_native_layer_norm_repeat_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0) buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (16, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (16, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 16), (1, 16), 0), out=buf5) buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (16, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 16), (1, 16), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf4, primals_5, buf7, 256, grid=grid(256), stream=stream0) del primals_5 buf8 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [k_1], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf5, primals_7, buf8, 256, grid=grid(256), stream=stream0) del primals_7 buf9 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), out=buf9) buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf9, buf10, 256, grid=grid(256), stream=stream0) buf11 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf10, buf11, 256, grid=grid(256), stream=stream0) buf12 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [v_1], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf6, primals_9, buf12, 256, grid=grid(256), stream=stream0) del primals_9 buf13 = reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.bmm] extern_kernels.bmm(buf11, reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf13, buf14, 256, grid=grid(256), stream=stream0) del buf13 buf15 = reinterpret_tensor(buf2, (16, 4), (4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf14, (16, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf15) del primals_11 buf16 = buf1; del buf1 # reuse buf17 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attention_1, out2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_7.run(buf15, primals_3, buf16, buf17, 16, grid=grid(16), stream=stream0) buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_1, out2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(buf15, primals_3, buf16, buf17, primals_12, primals_13, buf18, 64, grid=grid(64), stream=stream0) del buf16 del buf17 del primals_13 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse # Topologically Sorted Source Nodes: [attention_1, out3_1], Original ATen: [aten.add] triton_poi_fused_add_9.run(buf20, primals_15, buf15, primals_3, 64, grid=grid(64), stream=stream0) del primals_15 return (buf20, primals_3, primals_12, reinterpret_tensor(buf3, (16, 16), (16, 1), 0), buf11, reinterpret_tensor(buf14, (16, 16), (16, 1), 0), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), primals_14, primals_10, reinterpret_tensor(buf12, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((16, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def forward(self, query, key, value, mask=None): dk = query.size()[-1] scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) attention = F.softmax(scores, dim=-1) return attention.matmul(value) class MultiHeadAttention(nn.Module): def __init__(self, in_features, head_num, bias=True, activation=None): super(MultiHeadAttention, self).__init__() if in_features % head_num != 0: raise ValueError( '`in_features`({}) should be divisible by `head_num`({})'. format(in_features, head_num)) self.in_features = in_features self.head_num = head_num self.activation = activation self.bias = bias self.linear_q = nn.Linear(in_features, in_features, bias) self.linear_k = nn.Linear(in_features, in_features, bias) self.linear_v = nn.Linear(in_features, in_features, bias) self.linear_o = nn.Linear(in_features, in_features // self.head_num, bias) def forward(self, q, k, v, mask=None): q, k, v = self.linear_q(q), self.linear_k(k), self.linear_v(v) if self.activation is not None: q = self.activation(q) k = self.activation(k) v = self.activation(v) q = self._reshape_to_batches(q) k = self._reshape_to_batches(k) v = self._reshape_to_batches(v) if mask is not None: mask = mask.repeat(self.head_num, 1, 1) y = ScaledDotProductAttention()(q, k, v, mask) y = self._reshape_from_batches(y) y = self.linear_o(y) if self.activation is not None: y = self.activation(y) return y @staticmethod def gen_history_mask(x): """Generate the mask that only uses history data. :param x: Input tensor. :return: The mask. """ batch_size, seq_len, _ = x.size() return torch.tril(torch.ones(seq_len, seq_len)).view(1, seq_len, seq_len).repeat(batch_size, 1, 1) def _reshape_to_batches(self, x): batch_size, seq_len, in_feature = x.size() sub_dim = in_feature // self.head_num return x.reshape(batch_size, seq_len, self.head_num, sub_dim).permute( 0, 2, 1, 3).reshape(batch_size * self.head_num, seq_len, sub_dim) def _reshape_from_batches(self, x): batch_size, seq_len, in_feature = x.size() batch_size //= self.head_num out_dim = in_feature * self.head_num return x.reshape(batch_size, self.head_num, seq_len, in_feature ).permute(0, 2, 1, 3).reshape(batch_size, seq_len, out_dim) def extra_repr(self): return 'in_features={}, head_num={}, bias={}, activation={}'.format( self.in_features, self.head_num, self.bias, self.activation) class EncoderBlock(nn.Module): def __init__(self, embed_size, heads): super(EncoderBlock, self).__init__() self.heads = heads self.norm1 = nn.LayerNorm(embed_size) self.attention = MultiHeadAttention(embed_size * heads, head_num= heads, bias=True, activation=None) self.norm2 = nn.LayerNorm(embed_size) self.mlp = nn.Linear(embed_size, embed_size) def forward(self, x): out = self.norm1(x) out = out.repeat(1, 1, self.heads) attention = self.attention(out, out, out, mask=None) attention = attention + x out2 = self.norm2(attention) out3 = self.mlp(out2) out3 = out3 + attention return out3 def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embed_size': 4, 'heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_native_layer_norm_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + x0 % 4), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x4, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (16, 16), (16, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (16, 16), (16, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16, 16), (16, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (4, 16), (16, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_native_layer_norm_repeat_2[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (16, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 16), (1, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (16, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 16), (1, 16), 0), out=buf5) buf6 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (16, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 16), (1, 16), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(256)](buf4, primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf8 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_clone_3[grid(256)](buf5, primals_7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), out=buf9) buf10 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) buf11 = buf9 del buf9 triton_poi_fused__softmax_5[grid(256)](buf10, buf11, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf10 triton_poi_fused_clone_3[grid(256)](buf6, primals_9, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf13 = reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0) del buf6 extern_kernels.bmm(buf11, reinterpret_tensor(buf12, (16, 4, 4), (16, 4, 1), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_6[grid(256)](buf13, buf14, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf13 buf15 = reinterpret_tensor(buf2, (16, 4), (4, 1), 0) del buf2 extern_kernels.addmm(primals_11, reinterpret_tensor(buf14, (16, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 4), (1, 16), 0 ), alpha=1, beta=1, out=buf15) del primals_11 buf16 = buf1 del buf1 buf17 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_7[grid(16)](buf15, primals_3, buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(64)](buf15, primals_3, buf16, buf17, primals_12, primals_13, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf16 del buf17 del primals_13 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_9[grid(64)](buf20, primals_15, buf15, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_15 return buf20, primals_3, primals_12, reinterpret_tensor(buf3, (16, 16), (16, 1), 0), buf11, reinterpret_tensor(buf14, (16, 16), (16, 1), 0 ), buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0 ), primals_14, primals_10, reinterpret_tensor(buf12, (16, 4, 4), ( 16, 1, 4), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0 ), primals_8, primals_6, primals_4 class ScaledDotProductAttention(nn.Module): def forward(self, query, key, value, mask=None): dk = query.size()[-1] scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) attention = F.softmax(scores, dim=-1) return attention.matmul(value) class MultiHeadAttention(nn.Module): def __init__(self, in_features, head_num, bias=True, activation=None): super(MultiHeadAttention, self).__init__() if in_features % head_num != 0: raise ValueError( '`in_features`({}) should be divisible by `head_num`({})'. format(in_features, head_num)) self.in_features = in_features self.head_num = head_num self.activation = activation self.bias = bias self.linear_q = nn.Linear(in_features, in_features, bias) self.linear_k = nn.Linear(in_features, in_features, bias) self.linear_v = nn.Linear(in_features, in_features, bias) self.linear_o = nn.Linear(in_features, in_features // self.head_num, bias) def forward(self, q, k, v, mask=None): q, k, v = self.linear_q(q), self.linear_k(k), self.linear_v(v) if self.activation is not None: q = self.activation(q) k = self.activation(k) v = self.activation(v) q = self._reshape_to_batches(q) k = self._reshape_to_batches(k) v = self._reshape_to_batches(v) if mask is not None: mask = mask.repeat(self.head_num, 1, 1) y = ScaledDotProductAttention()(q, k, v, mask) y = self._reshape_from_batches(y) y = self.linear_o(y) if self.activation is not None: y = self.activation(y) return y @staticmethod def gen_history_mask(x): """Generate the mask that only uses history data. :param x: Input tensor. :return: The mask. """ batch_size, seq_len, _ = x.size() return torch.tril(torch.ones(seq_len, seq_len)).view(1, seq_len, seq_len).repeat(batch_size, 1, 1) def _reshape_to_batches(self, x): batch_size, seq_len, in_feature = x.size() sub_dim = in_feature // self.head_num return x.reshape(batch_size, seq_len, self.head_num, sub_dim).permute( 0, 2, 1, 3).reshape(batch_size * self.head_num, seq_len, sub_dim) def _reshape_from_batches(self, x): batch_size, seq_len, in_feature = x.size() batch_size //= self.head_num out_dim = in_feature * self.head_num return x.reshape(batch_size, self.head_num, seq_len, in_feature ).permute(0, 2, 1, 3).reshape(batch_size, seq_len, out_dim) def extra_repr(self): return 'in_features={}, head_num={}, bias={}, activation={}'.format( self.in_features, self.head_num, self.bias, self.activation) class EncoderBlockNew(nn.Module): def __init__(self, embed_size, heads): super(EncoderBlockNew, self).__init__() self.heads = heads self.norm1 = nn.LayerNorm(embed_size) self.attention = MultiHeadAttention(embed_size * heads, head_num= heads, bias=True, activation=None) self.norm2 = nn.LayerNorm(embed_size) self.mlp = nn.Linear(embed_size, embed_size) def forward(self, input_0): primals_1 = self.norm1.weight primals_2 = self.norm1.bias primals_4 = self.attention.linear_q.weight primals_5 = self.attention.linear_q.bias primals_6 = self.attention.linear_k.weight primals_7 = self.attention.linear_k.bias primals_8 = self.attention.linear_v.weight primals_9 = self.attention.linear_v.bias primals_10 = self.attention.linear_o.weight primals_11 = self.attention.linear_o.bias primals_12 = self.norm2.weight primals_13 = self.norm2.bias primals_14 = self.mlp.weight primals_15 = self.mlp.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
dukeNashor/CaptainStony
EncoderBlock
false
1,885
[ "MIT" ]
0
6320a27420e686666a4d7172437cf55fe42de2b6
https://github.com/dukeNashor/CaptainStony/tree/6320a27420e686666a4d7172437cf55fe42de2b6
import math import torch import torch.nn as nn import torch.nn.functional as F class ScaledDotProductAttention(nn.Module): def forward(self, query, key, value, mask=None): dk = query.size()[-1] scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) attention = F.softmax(scores, dim=-1) return attention.matmul(value) class MultiHeadAttention(nn.Module): def __init__(self, in_features, head_num, bias=True, activation=None): super().__init__() if in_features % head_num != 0: raise ValueError( '`in_features`({}) should be divisible by `head_num`({})'. format(in_features, head_num)) self.in_features = in_features self.head_num = head_num self.activation = activation self.bias = bias self.linear_q = nn.Linear(in_features, in_features, bias) self.linear_k = nn.Linear(in_features, in_features, bias) self.linear_v = nn.Linear(in_features, in_features, bias) self.linear_o = nn.Linear(in_features, in_features // self.head_num, bias) def forward(self, q, k, v, mask=None): q, k, v = self.linear_q(q), self.linear_k(k), self.linear_v(v) if self.activation is not None: q = self.activation(q) k = self.activation(k) v = self.activation(v) q = self._reshape_to_batches(q) k = self._reshape_to_batches(k) v = self._reshape_to_batches(v) if mask is not None: mask = mask.repeat(self.head_num, 1, 1) y = ScaledDotProductAttention()(q, k, v, mask) y = self._reshape_from_batches(y) y = self.linear_o(y) if self.activation is not None: y = self.activation(y) return y @staticmethod def gen_history_mask(x): """Generate the mask that only uses history data. :param x: Input tensor. :return: The mask. """ batch_size, seq_len, _ = x.size() return torch.tril(torch.ones(seq_len, seq_len)).view(1, seq_len, seq_len).repeat(batch_size, 1, 1) def _reshape_to_batches(self, x): batch_size, seq_len, in_feature = x.size() sub_dim = in_feature // self.head_num return x.reshape(batch_size, seq_len, self.head_num, sub_dim).permute( 0, 2, 1, 3).reshape(batch_size * self.head_num, seq_len, sub_dim) def _reshape_from_batches(self, x): batch_size, seq_len, in_feature = x.size() batch_size //= self.head_num out_dim = in_feature * self.head_num return x.reshape(batch_size, self.head_num, seq_len, in_feature ).permute(0, 2, 1, 3).reshape(batch_size, seq_len, out_dim) def extra_repr(self): return 'in_features={}, head_num={}, bias={}, activation={}'.format( self.in_features, self.head_num, self.bias, self.activation) class Model(nn.Module): def __init__(self, embed_size, heads): super().__init__() self.heads = heads self.norm1 = nn.LayerNorm(embed_size) self.attention = MultiHeadAttention(embed_size * heads, head_num= heads, bias=True, activation=None) self.norm2 = nn.LayerNorm(embed_size) self.mlp = nn.Linear(embed_size, embed_size) def forward(self, x): out = self.norm1(x) out = out.repeat(1, 1, self.heads) attention = self.attention(out, out, out, mask=None) attention = attention + x out2 = self.norm2(attention) out3 = self.mlp(out2) out3 = out3 + attention return out3 def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4]
RobertaClassificationHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_3 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return (reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.data import torch.nn class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super(RobertaClassificationHead, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5, num_labels=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4 class RobertaClassificationHeadNew(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super(RobertaClassificationHeadNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
HeartForNlp/VL-BERT
RobertaClassificationHead
false
1,886
[ "MIT" ]
0
c1a590e2597b592629329db126cf8eae74b49cc0
https://github.com/HeartForNlp/VL-BERT/tree/c1a590e2597b592629329db126cf8eae74b49cc0
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.data import torch.nn class Model(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob= 0.5, num_labels=4)}]
CrossEntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # cross_entropy => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/za/cza3krejiqg7tjfagjknvrkfb4hawyqjlraihwndrbdsaurpzx3y.py # Topologically Sorted Source Nodes: [cross_entropy, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.mean] # Source node to ATen node mapping: # cross_entropy => div, exp, log, mul, neg, sub_1, sum_1, sum_2 # mean => mean # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {}) triton_per_fused__log_softmax_div_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mean_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = 1.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [cross_entropy, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.mean] triton_per_fused__log_softmax_div_mean_mul_neg_sum_1.run(buf2, buf0, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.cpp_extension class CrossEntropyLoss(torch.nn.Module): def __init__(self): super(CrossEntropyLoss, self).__init__() self.ce_loss = torch.nn.CrossEntropyLoss() def forward(self, cls_output, label, **_): return self.ce_loss(cls_output, label).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = 1.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mean_mul_neg_sum_1[grid(1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class CrossEntropyLossNew(torch.nn.Module): def __init__(self): super(CrossEntropyLossNew, self).__init__() self.ce_loss = torch.nn.CrossEntropyLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ParthaEth/PyTorch-StudioGAN
CrossEntropyLoss
false
1,887
[ "MIT" ]
0
16dd84415e4b7f4667cb1b1e0ef3fc04edf6b5a9
https://github.com/ParthaEth/PyTorch-StudioGAN/tree/16dd84415e4b7f4667cb1b1e0ef3fc04edf6b5a9
import torch import torch.utils.cpp_extension class Model(torch.nn.Module): def __init__(self): super().__init__() self.ce_loss = torch.nn.CrossEntropyLoss() def forward(self, cls_output, label, **_): return self.ce_loss(cls_output, label).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
BertSelfOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ai/cai32p2ssjvpyulvuzcicdszqe3thbavgxn4jeed6uatjnl7yq2s.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/dt/cdtfyiepqseugx5m3udiopa26uo6fdp2fyvmfcoipxuqyqqnb2l6.py # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # hidden_states_2 => add_1, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/mn/cmntyljhuirhsdjg2yosgzllpkpxqedxgoyk6gunquq2rf3kl7u5.py # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # hidden_states_2 => add_1, add_2, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {}) triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf1, primals_3, primals_4, 256, grid=grid(256), stream=stream0) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_5, primals_6, buf4, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_6 return (buf4, primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.checkpoint class BertSelfOutput(nn.Module): def __init__(self, config, twin=False, merge=False): super().__init__() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) if twin: self.dense0 = nn.Linear(config.hidden_size, config.hidden_size) self.dense1 = nn.Linear(config.hidden_size, config.hidden_size) else: self.dense = nn.Linear(config.hidden_size, config.hidden_size) if merge: self.act = ACT2FN[config.hidden_act] self.merge_layer = nn.Linear(config.hidden_size * 2, config. hidden_size) self.merge = True else: self.merge = False def forward(self, hidden_states, input_tensor): if type(hidden_states) == list: hidden_states0 = self.dense0(hidden_states[0]) hidden_states1 = self.dense1(hidden_states[1]) if self.merge: hidden_states = self.merge_layer(torch.cat([hidden_states0, hidden_states1], dim=-1)) else: hidden_states = (hidden_states0 + hidden_states1) / 2 else: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.checkpoint assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1.0 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_3, primals_4, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 del primals_4 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_6 return buf4, primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), buf1 class BertSelfOutputNew(nn.Module): def __init__(self, config, twin=False, merge=False): super().__init__() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) if twin: self.dense0 = nn.Linear(config.hidden_size, config.hidden_size) self.dense1 = nn.Linear(config.hidden_size, config.hidden_size) else: self.dense = nn.Linear(config.hidden_size, config.hidden_size) if merge: self.act = ACT2FN[config.hidden_act] self.merge_layer = nn.Linear(config.hidden_size * 2, config. hidden_size) self.merge = True else: self.merge = False def forward(self, input_0, input_1): primals_3 = self.LayerNorm.weight primals_5 = self.LayerNorm.bias primals_2 = self.dense.weight primals_6 = self.dense.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
MikeWangWZHL/BLIP
BertSelfOutput
false
1,888
[ "BSD-3-Clause" ]
0
b82134f1892a54c8f63b0f4b51bdcb8684e1dc6d
https://github.com/MikeWangWZHL/BLIP/tree/b82134f1892a54c8f63b0f4b51bdcb8684e1dc6d
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.utils.checkpoint class Model(nn.Module): def __init__(self, config, twin=False, merge=False): super().__init__() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) if twin: self.dense0 = nn.Linear(config.hidden_size, config.hidden_size) self.dense1 = nn.Linear(config.hidden_size, config.hidden_size) else: self.dense = nn.Linear(config.hidden_size, config.hidden_size) if merge: self.act = ACT2FN[config.hidden_act] self.merge_layer = nn.Linear(config.hidden_size * 2, config. hidden_size) self.merge = True else: self.merge = False def forward(self, hidden_states, input_tensor): if type(hidden_states) == list: hidden_states0 = self.dense0(hidden_states[0]) hidden_states1 = self.dense1(hidden_states[1]) if self.merge: hidden_states = self.merge_layer(torch.cat([hidden_states0, hidden_states1], dim=-1)) else: hidden_states = (hidden_states0 + hidden_states1) / 2 else: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
TransformerDecoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py # Topologically Sorted Source Nodes: [tgt2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # tgt2 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/zv/czv3tzezwxkylzsgkrivaldxprnr7tvjr5iihe4mbc7bzdev5lsj.py # Topologically Sorted Source Nodes: [tgt2], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # tgt2 => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [1]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/nd/cnduix6bb25yait76qubu4kscpuuqgdjho4akakbftxdxfjk22sq.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] # Source node to ATen node mapping: # multi_head_attention_forward => mul_2 # Graph fragment: # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] # Source node to ATen node mapping: # multi_head_attention_forward => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/y5/cy5gjrtl7netbzcjhig66pdorub2vbq2qvwmv3tamld2ehimmlz7.py # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] # Source node to ATen node mapping: # multi_head_attention_forward => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask) tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ji/cjikooh3unjvssdwbmc5bbgrf7argvwkpdjikzfpajfrzpotlkhf.py # Topologically Sorted Source Nodes: [tgt, tgt2_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # tgt => add_2 # tgt2_1 => var_mean_1 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %squeeze), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [1]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/j4/cj4vucbv6vxdldbfg73k3ixw2brnd6f754oxugjq3s7syrcrb4qe.py # Topologically Sorted Source Nodes: [tgt, tgt2_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # tgt => add_2 # tgt2_1 => add_3, add_4, mul_3, mul_4, rsqrt_1, sub_2 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %squeeze), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_5, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_6), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_6), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4r/c4rhihz6pmsoyuxbrnbsbo7jlrm4qd4fkzjvko5iqmouqegzifpb.py # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add] # Source node to ATen node mapping: # tgt => add_2 # tgt_1 => add_5 # Graph fragment: # %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %squeeze), kwargs = {}) # %add_5 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %squeeze_2), kwargs = {}) triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp3 = tl.load(in_out_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/72/c72dbqgy5xkloy2rcp3rynir3hrgprnoecgmcynn3qr6522vq72l.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_14), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_9 = async_compile.triton('triton_poi_fused_relu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4e/c4escp7v3hikmzkxuyjmzdj3juzy4ty3yheqrkztwu2gvsteirte.py # Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add] # Source node to ATen node mapping: # tgt_2 => add_8 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_16), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_tensor), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (12, 4), (4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (2048, 4), (4, 1)) assert_size_stride(primals_14, (2048, ), (1, )) assert_size_stride(primals_15, (4, 2048), (2048, 1)) assert_size_stride(primals_16, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [tgt2], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 4, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 16, grid=grid(16), stream=stream0) del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf6, 16, grid=grid(16), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul, aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf8, buf9, 64, grid=grid(64), stream=stream0) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm] extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf10, buf11, 4, 4, grid=grid(4, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf11, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1; del buf1 # reuse buf14 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [tgt, tgt2_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(primals_3, buf12, buf13, buf14, 4, grid=grid(4), stream=stream0) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt, tgt2_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_7.run(primals_3, buf12, buf13, buf14, primals_6, primals_7, buf15, 16, grid=grid(16), stream=stream0) del primals_7 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mm] extern_kernels.mm(buf15, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf16) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mm] extern_kernels.mm(primals_8, reinterpret_tensor(primals_9, (4, 4), (1, 4), 16), out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mm] extern_kernels.mm(primals_8, reinterpret_tensor(primals_9, (4, 4), (1, 4), 32), out=buf18) buf19 = reinterpret_tensor(buf16, (4, 4, 1), (1, 4, 16), 0); del buf16 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf19, 16, grid=grid(16), stream=stream0) buf20 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mul, aten.bmm] extern_kernels.bmm(buf19, reinterpret_tensor(buf17, (4, 1, 4), (1, 1, 4), 0), out=buf20) buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf20, buf21, 64, grid=grid(64), stream=stream0) buf22 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf21, buf22, 64, grid=grid(64), stream=stream0) del buf21 buf23 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf22, reinterpret_tensor(buf18, (4, 4, 1), (1, 4, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf23, buf24, 4, 4, grid=grid(4, 4), stream=stream0) buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0); del buf23 # reuse # Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf24, (4, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf25) buf26 = buf25; del buf25 # reuse # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add] triton_poi_fused_add_8.run(buf26, primals_3, buf12, 16, grid=grid(16), stream=stream0) buf27 = buf14; del buf14 # reuse buf28 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [tgt2_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_0.run(buf26, buf27, buf28, 4, grid=grid(4), stream=stream0) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt2_2], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf26, buf27, buf28, primals_11, primals_12, buf29, 16, grid=grid(16), stream=stream0) del buf27 del buf28 del primals_12 buf30 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf29, reinterpret_tensor(primals_13, (4, 2048), (1, 4), 0), out=buf30) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] triton_poi_fused_relu_9.run(buf31, primals_14, 8192, grid=grid(8192), stream=stream0) del primals_14 buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf31, reinterpret_tensor(primals_15, (2048, 4), (1, 2048), 0), out=buf32) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf33, buf26, primals_16, 16, grid=grid(16), stream=stream0) del primals_16 return (buf33, primals_3, primals_6, primals_11, buf2, buf9, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), buf12, buf15, primals_8, buf22, reinterpret_tensor(buf24, (4, 4), (4, 1), 0), buf26, buf29, buf31, primals_15, primals_13, primals_10, reinterpret_tensor(buf18, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf19, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf17, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F import torch.utils.model_zoo def _get_activation_fn(activation): """Return an activation function given a string""" if activation == 'relu': return F.relu if activation == 'gelu': return F.gelu if activation == 'glu': return F.glu raise RuntimeError(f'activation should be relu/gelu, not {activation}.') class TransformerDecoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, no_norm=False, activation='relu'): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout= dropout, bias=False) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout =dropout, bias=False) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.norm2 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.norm3 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) def with_pos_embed(self, tensor, pos): return tensor if pos is None else tensor + pos def forward(self, tgt, memory, pos=None, query_pos=None): tgt2 = self.norm1(tgt) q = k = self.with_pos_embed(tgt2, query_pos) tgt2 = self.self_attn(q, k, value=tgt2)[0] tgt = tgt + self.dropout1(tgt2) tgt2 = self.norm2(tgt) tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), key=self.with_pos_embed(memory, pos), value=memory)[0] tgt = tgt + self.dropout2(tgt2) tgt2 = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) tgt = tgt + self.dropout3(tgt2) return tgt def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.nn.functional as F import torch.utils.model_zoo assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_out_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (12, 4), (4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (2048, 4), (4, 1)) assert_size_stride(primals_14, (2048,), (1,)) assert_size_stride(primals_15, (4, 2048), (2048, 1)) assert_size_stride(primals_16, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 16), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4 ), 32), out=buf5) buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0) del buf3 triton_poi_fused_mul_2[grid(16)](buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf7 del buf7 triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 extern_kernels.mm(reinterpret_tensor(buf11, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf12) buf13 = buf1 del buf1 buf14 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_3, buf12, buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_3, buf12, buf13, buf14, primals_6, primals_7, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf16) buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_8, reinterpret_tensor(primals_9, (4, 4), (1, 4), 16), out=buf17) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_8, reinterpret_tensor(primals_9, (4, 4), (1, 4), 32), out=buf18) buf19 = reinterpret_tensor(buf16, (4, 4, 1), (1, 4, 16), 0) del buf16 triton_poi_fused_mul_2[grid(16)](buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) buf20 = buf8 del buf8 extern_kernels.bmm(buf19, reinterpret_tensor(buf17, (4, 1, 4), (1, 1, 4), 0), out=buf20) buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf20, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1) buf22 = buf20 del buf20 triton_poi_fused__softmax_4[grid(64)](buf21, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 buf23 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf22, reinterpret_tensor(buf18, (4, 4, 1), (1, 4, 1), 0), out=buf23) buf24 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf23, buf24, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0) del buf23 extern_kernels.mm(reinterpret_tensor(buf24, (4, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf25) buf26 = buf25 del buf25 triton_poi_fused_add_8[grid(16)](buf26, primals_3, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf27 = buf14 del buf14 buf28 = buf13 del buf13 triton_poi_fused_native_layer_norm_0[grid(4)](buf26, buf27, buf28, 4, XBLOCK=4, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](buf26, buf27, buf28, primals_11, primals_12, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf27 del buf28 del primals_12 buf30 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32) extern_kernels.mm(buf29, reinterpret_tensor(primals_13, (4, 2048), (1, 4), 0), out=buf30) buf31 = buf30 del buf30 triton_poi_fused_relu_9[grid(8192)](buf31, primals_14, 8192, XBLOCK =256, num_warps=4, num_stages=1) del primals_14 buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf31, reinterpret_tensor(primals_15, (2048, 4), (1, 2048), 0), out=buf32) buf33 = buf32 del buf32 triton_poi_fused_add_10[grid(16)](buf33, buf26, primals_16, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_16 return (buf33, primals_3, primals_6, primals_11, buf2, buf9, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), buf12, buf15, primals_8, buf22, reinterpret_tensor(buf24, (4, 4), (4, 1), 0), buf26, buf29, buf31, primals_15, primals_13, primals_10, reinterpret_tensor(buf18, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf19, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf17, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0)) def _get_activation_fn(activation): """Return an activation function given a string""" if activation == 'relu': return F.relu if activation == 'gelu': return F.gelu if activation == 'glu': return F.glu raise RuntimeError(f'activation should be relu/gelu, not {activation}.') class TransformerDecoderLayerNew(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, no_norm=False, activation='relu'): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout= dropout, bias=False) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout =dropout, bias=False) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.norm2 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.norm3 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) def with_pos_embed(self, tensor, pos): return tensor if pos is None else tensor + pos def forward(self, input_0, input_1): primals_4 = self.self_attn.in_proj_weight primals_3 = self.self_attn.out_proj.weight primals_9 = self.multihead_attn.in_proj_weight primals_5 = self.multihead_attn.out_proj.weight primals_13 = self.linear1.weight primals_14 = self.linear1.bias primals_15 = self.linear2.weight primals_1 = self.linear2.bias primals_2 = self.norm1.weight primals_6 = self.norm1.bias primals_7 = self.norm2.weight primals_11 = self.norm2.bias primals_12 = self.norm3.weight primals_16 = self.norm3.bias primals_8 = input_0 primals_10 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return output[0]
dongyan007/Pretrained-IPT-main-master
TransformerDecoderLayer
false
1,889
[ "Apache-2.0" ]
0
7ed47002373e11bd57b7904f6935acdfba1e44ff
https://github.com/dongyan007/Pretrained-IPT-main-master/tree/7ed47002373e11bd57b7904f6935acdfba1e44ff
import torch from torch import nn import torch.nn.functional as F import torch.utils.model_zoo def _get_activation_fn(activation): """Return an activation function given a string""" if activation == 'relu': return F.relu if activation == 'gelu': return F.gelu if activation == 'glu': return F.glu raise RuntimeError(f'activation should be relu/gelu, not {activation}.') class Model(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, no_norm=False, activation='relu'): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout= dropout, bias=False) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout =dropout, bias=False) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.norm2 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.norm3 = nn.LayerNorm(d_model) if not no_norm else nn.Identity() self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) def with_pos_embed(self, tensor, pos): return tensor if pos is None else tensor + pos def forward(self, tgt, memory, pos=None, query_pos=None): tgt2 = self.norm1(tgt) q = k = self.with_pos_embed(tgt2, query_pos) tgt2 = self.self_attn(q, k, value=tgt2)[0] tgt = tgt + self.dropout1(tgt2) tgt2 = self.norm2(tgt) tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), key=self.with_pos_embed(memory, pos), value=memory)[0] tgt = tgt + self.dropout2(tgt2) tgt2 = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) tgt = tgt + self.dropout3(tgt2) return tgt def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4]
Sampling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/wm/cwmqlbvbics7z7kr4p5f3ne6rdnkbsrcrcxxrr56pqdyjkrxldj7.py # Topologically Sorted Source Nodes: [conv_out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv_out => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv_out], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv_out], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 return (reinterpret_tensor(buf1, (4, ), (4, ), 3), primals_2, reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch from torch import nn class Sampling(nn.Module): def __init__(self, args, seq_len): super(Sampling, self).__init__() self.conv = nn.Conv1d(seq_len, args.att_out_channel, kernel_size=1) def forward(self, x): """ :param x: (batch, N=1, channel, wavelet_seq) :return: (batch, N=1, att_out_channel, wavelet_seq[-1]) """ x = x.squeeze() conv_out = self.conv(x) return conv_out[..., -1] def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'args': _mock_config(att_out_channel=4), 'seq_len': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf1, (4,), (4,), 3 ), primals_2, reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0) class SamplingNew(nn.Module): def __init__(self, args, seq_len): super(SamplingNew, self).__init__() self.conv = nn.Conv1d(seq_len, args.att_out_channel, kernel_size=1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ICLab4DL/AWN
Sampling
false
1,890
[ "MIT" ]
0
48d6edd85eabd77e9bb410dc5f31f8f937c9a857
https://github.com/ICLab4DL/AWN/tree/48d6edd85eabd77e9bb410dc5f31f8f937c9a857
from _paritybench_helpers import _mock_config import torch from torch import nn class Model(nn.Module): def __init__(self, args, seq_len): super().__init__() self.conv = nn.Conv1d(seq_len, args.att_out_channel, kernel_size=1) def forward(self, x): """ :param x: (batch, N=1, channel, wavelet_seq) :return: (batch, N=1, att_out_channel, wavelet_seq[-1]) """ x = x.squeeze() conv_out = self.conv(x) return conv_out[..., -1] def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [4]
PoseNetFeat
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/gj/cgjvtebls4tlyx7vrq7femvjnasdisgzq2dhtos5qfetig5jihmc.py # Topologically Sorted Source Nodes: [conv1d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv1d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/g2/cg2nw6ecspq7a3naj7mf2x6difdt6rg4piybxw2z4ajnllncw56b.py # Topologically Sorted Source Nodes: [pointfeat_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # pointfeat_2 => cat_1 # Graph fragment: # %cat_1 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_2, %relu_3], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 256 x0 = xindex % 64 x2 = (xindex // 16384) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (8192*x2)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 256, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (64*((-128) + x1)) + (8192*x2)), tmp12, other=0.0) tmp16 = tl.load(in_ptr3 + ((-128) + x1), tmp12, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + (x3), tmp21, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/an/canhpnb43drx25h7zse5diln5yhkabraygooa4ndxm44eud6u5dl.py # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %cat_1, %relu_4], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 163840 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 640 x0 = xindex % 64 x2 = (xindex // 40960) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 64, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + (x0 + (64*x1) + (4096*x2)), tmp7, other=0.0) tmp9 = tmp0 >= tmp5 tmp10 = tmp9 & tmp4 tmp11 = tl.load(in_ptr1 + (x0 + (64*((-64) + x1)) + (4096*x2)), tmp10, other=0.0) tmp12 = tl.where(tmp6, tmp8, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 384, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr2 + (x0 + (64*((-128) + x1)) + (16384*x2)), tmp18, other=0.0) tmp20 = tmp0 >= tmp16 tmp21 = tl.full([1], 640, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tl.load(in_ptr3 + (x0 + (64*((-384) + x1)) + (16384*x2)), tmp20, other=0.0) tmp24 = tl.load(in_ptr4 + ((-384) + x1), tmp20, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full([1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tl.where(tmp18, tmp19, tmp29) tmp31 = tl.where(tmp4, tmp14, tmp30) tl.store(out_ptr0 + (x3), tmp31, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vo/cvohlke4xnfgzejz5kvozs5rcwgr4vxsqfc4mjjsdfo43lpeufna.py # Topologically Sorted Source Nodes: [conv1d_5, x_4], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv1d_5 => convolution_5 # x_4 => gt, mul, where # Graph fragment: # %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_13, %primals_14, [1], [0], [1], False, [0], 1), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 0.01), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution_5, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 81920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 320 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x3), tmp4, None) tl.store(out_ptr1 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/gf/cgfraiihnqesk5wbpwygpjlbm4yn3jflohmpeu3fitfqezchskef.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution_6 # Graph fragment: # %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_15, %primals_16, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 40960 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 160 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ij/cij356nxgzjxr37734cyp7zp3rxxxpzv3bzotmc5e5bqayalpyqb.py # Topologically Sorted Source Nodes: [conv1d_4, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv1d_4 => convolution_4 # x_2 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_11, %primals_12, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ho/choewu5xusgcnc4x2qdqs3iu2nxek254eb3qotcljnxxh32ofi24.py # Topologically Sorted Source Nodes: [conv1d_3, emb_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv1d_3 => convolution_3 # emb_1 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_9, %primals_10, [1], [0], [1], False, [0], 1), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16 = args args.clear() assert_size_stride(primals_1, (64, 3, 1), (3, 1, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64), (192, 64, 1)) assert_size_stride(primals_4, (64, 32, 1), (32, 1, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (4, 32, 64), (2048, 64, 1)) assert_size_stride(primals_7, (128, 64, 1), (64, 1, 1)) assert_size_stride(primals_8, (128, ), (1, )) assert_size_stride(primals_9, (128, 64, 1), (64, 1, 1)) assert_size_stride(primals_10, (128, ), (1, )) assert_size_stride(primals_11, (256, 256, 1), (256, 1, 1)) assert_size_stride(primals_12, (256, ), (1, )) assert_size_stride(primals_13, (320, 640, 1), (640, 1, 1)) assert_size_stride(primals_14, (320, ), (1, )) assert_size_stride(primals_15, (160, 320, 1), (320, 1, 1)) assert_size_stride(primals_16, (160, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64), (4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv1d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 16384, grid=grid(16384), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64), (4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv1d_1, emb], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 16384, grid=grid(16384), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf1, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 64), (8192, 64, 1)) # Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf3, primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 64), (8192, 64, 1)) buf6 = empty_strided_cuda((4, 256, 64), (16384, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [pointfeat_2], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(buf4, primals_8, buf5, primals_10, buf6, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv1d_4], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_11, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf7, (4, 256, 64), (16384, 64, 1)) buf8 = empty_strided_cuda((4, 640, 64), (40960, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf1, buf3, buf6, buf7, primals_12, buf8, 163840, grid=grid(163840), stream=stream0) # Topologically Sorted Source Nodes: [conv1d_5], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, primals_13, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf9, (4, 320, 64), (20480, 64, 1)) buf10 = empty_strided_cuda((4, 320, 64), (20480, 64, 1), torch.bool) buf11 = empty_strided_cuda((4, 320, 64), (20480, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [conv1d_5, x_4], Original ATen: [aten.convolution, aten.leaky_relu] triton_poi_fused_convolution_leaky_relu_3.run(buf9, primals_14, buf10, buf11, 81920, grid=grid(81920), stream=stream0) del buf9 del primals_14 # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf11, primals_15, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf12, (4, 160, 64), (10240, 64, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf13, primals_16, 40960, grid=grid(40960), stream=stream0) del primals_16 buf14 = empty_strided_cuda((4, 256, 64), (16384, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv1d_4, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_5.run(buf7, primals_12, buf14, 65536, grid=grid(65536), stream=stream0) del buf7 del primals_12 buf15 = empty_strided_cuda((4, 128, 64), (8192, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv1d_3, emb_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_6.run(buf5, primals_10, buf15, 32768, grid=grid(32768), stream=stream0) del buf5 del primals_10 buf16 = empty_strided_cuda((4, 128, 64), (8192, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [conv1d_2, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_6.run(buf4, primals_8, buf16, 32768, grid=grid(32768), stream=stream0) del buf4 del primals_8 return (buf13, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, primals_11, primals_13, primals_15, buf1, buf3, buf6, buf8, buf10, buf11, buf14, buf15, buf16, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 1), (3, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64), (192, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 32, 1), (32, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 32, 64), (2048, 64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, 64, 1), (64, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, 64, 1), (64, 1, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, 256, 1), (256, 1, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((320, 640, 1), (640, 1, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((320, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((160, 320, 1), (320, 1, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((160, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.utils.data import torch.utils.cpp_extension class PoseNetFeat(nn.Module): def __init__(self, num_points): super(PoseNetFeat, self).__init__() self.conv1 = torch.nn.Conv1d(3, 64, 1) self.conv2 = torch.nn.Conv1d(64, 128, 1) self.e_conv1 = torch.nn.Conv1d(32, 64, 1) self.e_conv2 = torch.nn.Conv1d(64, 128, 1) self.conv5 = torch.nn.Conv1d(256, 256, 1) self.all_conv1 = torch.nn.Conv1d(640, 320, 1) self.all_conv2 = torch.nn.Conv1d(320, 160, 1) self.num_points = num_points def forward(self, x, emb): x = F.relu(self.conv1(x)) emb = F.relu(self.e_conv1(emb)) pointfeat_1 = torch.cat((x, emb), dim=1) x = F.relu(self.conv2(x)) emb = F.relu(self.e_conv2(emb)) pointfeat_2 = torch.cat((x, emb), dim=1) x = F.relu(self.conv5(pointfeat_2)) x = torch.cat([pointfeat_1, pointfeat_2, x], dim=1).contiguous() x = F.leaky_relu(self.all_conv1(x)) x = self.all_conv2(x) return x def get_inputs(): return [torch.rand([4, 3, 64]), torch.rand([4, 32, 64])] def get_init_inputs(): return [[], {'num_points': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.parallel import torch.utils.data import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 256 x0 = xindex % 64 x2 = xindex // 16384 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 8192 * x2), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp15 = tl.load(in_ptr2 + (x0 + 64 * (-128 + x1) + 8192 * x2), tmp12, other=0.0) tmp16 = tl.load(in_ptr3 + (-128 + x1), tmp12, eviction_policy= 'evict_last', other=0.0) tmp17 = tmp15 + tmp16 tmp18 = triton_helpers.maximum(tmp8, tmp17) tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp12, tmp18, tmp19) tmp21 = tl.where(tmp4, tmp11, tmp20) tl.store(out_ptr0 + x3, tmp21, None) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 640 x0 = xindex % 64 x2 = xindex // 40960 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 64, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.load(in_ptr0 + (x0 + 64 * x1 + 4096 * x2), tmp7, other=0.0) tmp9 = tmp0 >= tmp5 tmp10 = tmp9 & tmp4 tmp11 = tl.load(in_ptr1 + (x0 + 64 * (-64 + x1) + 4096 * x2), tmp10, other=0.0) tmp12 = tl.where(tmp6, tmp8, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 384, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr2 + (x0 + 64 * (-128 + x1) + 16384 * x2), tmp18, other=0.0) tmp20 = tmp0 >= tmp16 tl.full([1], 640, tl.int64) tmp23 = tl.load(in_ptr3 + (x0 + 64 * (-384 + x1) + 16384 * x2), tmp20, other=0.0) tmp24 = tl.load(in_ptr4 + (-384 + x1), tmp20, eviction_policy= 'evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full([1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tl.where(tmp18, tmp19, tmp29) tmp31 = tl.where(tmp4, tmp14, tmp30) tl.store(out_ptr0 + x3, tmp31, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 320 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, None) tl.store(out_ptr1 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 160 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16) = args args.clear() assert_size_stride(primals_1, (64, 3, 1), (3, 1, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64), (192, 64, 1)) assert_size_stride(primals_4, (64, 32, 1), (32, 1, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 32, 64), (2048, 64, 1)) assert_size_stride(primals_7, (128, 64, 1), (64, 1, 1)) assert_size_stride(primals_8, (128,), (1,)) assert_size_stride(primals_9, (128, 64, 1), (64, 1, 1)) assert_size_stride(primals_10, (128,), (1,)) assert_size_stride(primals_11, (256, 256, 1), (256, 1, 1)) assert_size_stride(primals_12, (256,), (1,)) assert_size_stride(primals_13, (320, 640, 1), (640, 1, 1)) assert_size_stride(primals_14, (320,), (1,)) assert_size_stride(primals_15, (160, 320, 1), (320, 1, 1)) assert_size_stride(primals_16, (160,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 64), (4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_6, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64), (4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(16384)](buf3, primals_5, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf1, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 64), (8192, 64, 1)) buf5 = extern_kernels.convolution(buf3, primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf5, (4, 128, 64), (8192, 64, 1)) buf6 = empty_strided_cuda((4, 256, 64), (16384, 64, 1), torch.float32) triton_poi_fused_cat_1[grid(65536)](buf4, primals_8, buf5, primals_10, buf6, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf6, primals_11, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf7, (4, 256, 64), (16384, 64, 1)) buf8 = empty_strided_cuda((4, 640, 64), (40960, 64, 1), torch.float32) triton_poi_fused_cat_2[grid(163840)](buf1, buf3, buf6, buf7, primals_12, buf8, 163840, XBLOCK=512, num_warps=8, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_13, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf9, (4, 320, 64), (20480, 64, 1)) buf10 = empty_strided_cuda((4, 320, 64), (20480, 64, 1), torch.bool) buf11 = empty_strided_cuda((4, 320, 64), (20480, 64, 1), torch.float32) triton_poi_fused_convolution_leaky_relu_3[grid(81920)](buf9, primals_14, buf10, buf11, 81920, XBLOCK=512, num_warps=8, num_stages=1) del buf9 del primals_14 buf12 = extern_kernels.convolution(buf11, primals_15, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf12, (4, 160, 64), (10240, 64, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_4[grid(40960)](buf13, primals_16, 40960, XBLOCK=512, num_warps=4, num_stages=1) del primals_16 buf14 = empty_strided_cuda((4, 256, 64), (16384, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_5[grid(65536)]( buf7, primals_12, buf14, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf7 del primals_12 buf15 = empty_strided_cuda((4, 128, 64), (8192, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(32768)]( buf5, primals_10, buf15, 32768, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del primals_10 buf16 = empty_strided_cuda((4, 128, 64), (8192, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(32768)]( buf4, primals_8, buf16, 32768, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_8 return (buf13, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, primals_11, primals_13, primals_15, buf1, buf3, buf6, buf8, buf10, buf11, buf14, buf15, buf16) class PoseNetFeatNew(nn.Module): def __init__(self, num_points): super(PoseNetFeatNew, self).__init__() self.conv1 = torch.nn.Conv1d(3, 64, 1) self.conv2 = torch.nn.Conv1d(64, 128, 1) self.e_conv1 = torch.nn.Conv1d(32, 64, 1) self.e_conv2 = torch.nn.Conv1d(64, 128, 1) self.conv5 = torch.nn.Conv1d(256, 256, 1) self.all_conv1 = torch.nn.Conv1d(640, 320, 1) self.all_conv2 = torch.nn.Conv1d(320, 160, 1) self.num_points = num_points def forward(self, input_0, input_1): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_7 = self.conv2.weight primals_8 = self.conv2.bias primals_4 = self.e_conv1.weight primals_5 = self.e_conv1.bias primals_9 = self.e_conv2.weight primals_10 = self.e_conv2.bias primals_11 = self.conv5.weight primals_12 = self.conv5.bias primals_13 = self.all_conv1.weight primals_14 = self.all_conv1.bias primals_15 = self.all_conv2.weight primals_16 = self.all_conv2.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16]) return output[0]
LiCHOTHU/ocean-kp
PoseNetFeat
false
1,891
[ "MIT" ]
0
2102bda2e51233baad0da12a6b1f168a7882564b
https://github.com/LiCHOTHU/ocean-kp/tree/2102bda2e51233baad0da12a6b1f168a7882564b
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel import torch.utils.data import torch.utils.cpp_extension class Model(nn.Module): def __init__(self, num_points): super().__init__() self.conv1 = torch.nn.Conv1d(3, 64, 1) self.conv2 = torch.nn.Conv1d(64, 128, 1) self.e_conv1 = torch.nn.Conv1d(32, 64, 1) self.e_conv2 = torch.nn.Conv1d(64, 128, 1) self.conv5 = torch.nn.Conv1d(256, 256, 1) self.all_conv1 = torch.nn.Conv1d(640, 320, 1) self.all_conv2 = torch.nn.Conv1d(320, 160, 1) self.num_points = num_points def forward(self, x, emb): x = F.relu(self.conv1(x)) emb = F.relu(self.e_conv1(emb)) pointfeat_1 = torch.cat((x, emb), dim=1) x = F.relu(self.conv2(x)) emb = F.relu(self.e_conv2(emb)) pointfeat_2 = torch.cat((x, emb), dim=1) x = F.relu(self.conv5(pointfeat_2)) x = torch.cat([pointfeat_1, pointfeat_2, x], dim=1).contiguous() x = F.leaky_relu(self.all_conv1(x)) x = self.all_conv2(x) return x def get_inputs(): return [torch.rand([4, 3, 64]), torch.rand([4, 32, 64])] def get_init_inputs(): return [4]
BertIntermediate
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/6s/c6shmuvjmq6zc4ifvdsynorwri47ra63qxa7jg3e7p6lw6xlqj5q.py # Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] # Source node to ATen node mapping: # add => add # erf => erf # hidden_states_1 => mul_1 # mul => mul # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_add_div_erf_mul_0 = async_compile.triton('triton_poi_fused_add_div_erf_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT"s gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.intermediate_act_fn = gelu def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, intermediate_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT"s gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertIntermediateNew(nn.Module): def __init__(self, config): super(BertIntermediateNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.intermediate_act_fn = gelu def forward(self, input_0): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
RoshanTanisha/TVCaption
BertIntermediate
false
1,892
[ "MIT" ]
0
8b14a340134ec69ed87426ee1f0e93e53f6456e5
https://github.com/RoshanTanisha/TVCaption/tree/8b14a340134ec69ed87426ee1f0e93e53f6456e5
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT"s gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class Model(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.intermediate_act_fn = gelu def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/ch/cchblwxibhbejgpezaajsszmynwayrtalecnd5ksg22infzh2v3o.py # Topologically Sorted Source Nodes: [dot, dot_1], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # dot => add # dot_1 => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, %expand), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/pw/cpw7p22sygsv72j67klg2k3xeokdqrvalgmkjpkzpqpj5pv4xmkc.py # Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax] # Source node to ATen node mapping: # weight => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr1 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_5, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot, dot_1], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(primals_2, buf0, primals_4, buf1, 256, grid=grid(256), stream=stream0) del primals_2 del primals_4 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf3, buf4, buf5, buf6, 4, 16, grid=grid(4), stream=stream0) buf7 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 16), (16, 0, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 4, 1), 0), out=buf7) del buf6 return (reinterpret_tensor(buf7, (4, 4), (4, 1), 0), primals_5, buf1, buf3, buf4, buf5, reinterpret_tensor(primals_1, (4, 4, 16), (64, 1, 4), 0), primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import * class Attention(nn.Module): def __init__(self, opt): super(Attention, self).__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.h2att = nn.Linear(self.rnn_size, self.att_hid_size) self.alpha_net = nn.Linear(self.att_hid_size, 1) def forward(self, h, att_feats, p_att_feats, att_masks=None): att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1) att = p_att_feats.view(-1, att_size, self.att_hid_size) att_h = self.h2att(h) att_h = att_h.unsqueeze(1).expand_as(att) dot = att + att_h dot = torch.tanh(dot) dot = dot.view(-1, self.att_hid_size) dot = self.alpha_net(dot) dot = dot.view(-1, att_size) weight = F.softmax(dot, dim=1) if att_masks is not None: weight = weight * att_masks.view(-1, att_size).float() weight = weight / weight.sum(1, keepdim=True) att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) return att_res def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(rnn_size=4, att_hid_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_5, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](primals_2, buf0, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_per_fused__softmax_1[grid(4)](buf3, buf4, buf5, buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf7 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 16), (16, 0, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 4, 1), 0), out=buf7) del buf6 return reinterpret_tensor(buf7, (4, 4), (4, 1), 0 ), primals_5, buf1, buf3, buf4, buf5, reinterpret_tensor(primals_1, (4, 4, 16), (64, 1, 4), 0), primals_6 class AttentionNew(nn.Module): def __init__(self, opt): super(AttentionNew, self).__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.h2att = nn.Linear(self.rnn_size, self.att_hid_size) self.alpha_net = nn.Linear(self.att_hid_size, 1) def forward(self, input_0, input_1, input_2): primals_3 = self.h2att.weight primals_4 = self.h2att.bias primals_6 = self.alpha_net.weight primals_7 = self.alpha_net.bias primals_5 = input_0 primals_1 = input_1 primals_2 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
VISLANG-Lab/MGCL
Attention
false
1,893
[ "MIT" ]
0
22da06ffa7410d9632bfda8eefb1b79e4f660de0
https://github.com/VISLANG-Lab/MGCL/tree/22da06ffa7410d9632bfda8eefb1b79e4f660de0
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import * class Model(nn.Module): def __init__(self, opt): super().__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.h2att = nn.Linear(self.rnn_size, self.att_hid_size) self.alpha_net = nn.Linear(self.att_hid_size, 1) def forward(self, h, att_feats, p_att_feats, att_masks=None): att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1) att = p_att_feats.view(-1, att_size, self.att_hid_size) att_h = self.h2att(h) att_h = att_h.unsqueeze(1).expand_as(att) dot = att + att_h dot = torch.tanh(dot) dot = dot.view(-1, self.att_hid_size) dot = self.alpha_net(dot) dot = dot.view(-1, att_size) weight = F.softmax(dot, dim=1) if att_masks is not None: weight = weight * att_masks.view(-1, att_size).float() weight = weight / weight.sum(1, keepdim=True) att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) return att_res def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
PositionWiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/6s/c6shmuvjmq6zc4ifvdsynorwri47ra63qxa7jg3e7p6lw6xlqj5q.py # Topologically Sorted Source Nodes: [mul, truediv, erf, add, mul_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] # Source node to ATen node mapping: # add => add # erf => erf # mul => mul # mul_1 => mul_1 # truediv => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_add_div_erf_mul_0 = async_compile.triton('triton_poi_fused_add_div_erf_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, truediv, erf, add, mul_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): """ gelu activation function """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class PositionWiseFeedForward(nn.Module): """ feedForward neural networks for each position """ def __init__(self, cfg): super().__init__() self.fc1 = nn.Linear(cfg.hidden_dim, cfg.feedforward_dim) self.fc2 = nn.Linear(cfg.feedforward_dim, cfg.hidden_dim) def forward(self, x): return self.fc2(gelu(self.fc1(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(hidden_dim=4, feedforward_dim=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4 def gelu(x): """ gelu activation function """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class PositionWiseFeedForwardNew(nn.Module): """ feedForward neural networks for each position """ def __init__(self, cfg): super().__init__() self.fc1 = nn.Linear(cfg.hidden_dim, cfg.feedforward_dim) self.fc2 = nn.Linear(cfg.feedforward_dim, cfg.hidden_dim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Kyumin-Park/Protein-Chemical-Releativity-BERT
PositionWiseFeedForward
false
1,894
[ "MIT" ]
0
6a339f4e640d99199f38a00769f5872c2a53ac55
https://github.com/Kyumin-Park/Protein-Chemical-Releativity-BERT/tree/6a339f4e640d99199f38a00769f5872c2a53ac55
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): """ gelu activation function """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class Model(nn.Module): """ feedForward neural networks for each position """ def __init__(self, cfg): super().__init__() self.fc1 = nn.Linear(cfg.hidden_dim, cfg.feedforward_dim) self.fc2 = nn.Linear(cfg.feedforward_dim, cfg.hidden_dim) def forward(self, x): return self.fc2(gelu(self.fc1(x))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Autoencoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/36/c36goqekbheqmzqx63ibehvw5xzi6nve5f33bertb3dmpfgep4fh.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/j6/cj6faeofhfnxsh5iuwazughjlau4igyajnmvjequyelq7apzs4qm.py # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/6y/c6yx6oq7oo2cwoaop3iwu5iqfdckg6lycdtu4jjuiv3wdcf2o6p7.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_3 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/nf/cnfhq3s5fm35zutth7czcwttunipax3q5z32n24vacugmoz6fkax.py # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # interpolate => add, add_1, convert_element_type, convert_element_type_1, iota, mul, mul_1 # Graph fragment: # %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add, torch.float32), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {}) # %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_1, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_4 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/np/cnpgu3r4fprt77zrak6visjrmr7uuamtqq36kiy6wnudec7hc3j6.py # Topologically Sorted Source Nodes: [x_4, interpolate, x_5], Original ATen: [aten.convolution, aten._unsafe_index, aten.relu] # Source node to ATen node mapping: # interpolate => _unsafe_index # x_4 => convolution_2 # x_5 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_2, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%_unsafe_index,), kwargs = {}) triton_poi_fused__unsafe_index_convolution_relu_5 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 32) % 32 x0 = xindex % 32 x5 = (xindex // 1024) x2 = (xindex // 1024) % 32 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (16*tmp4) + (256*x5)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x6), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7u/c7ucpk25ezixug3p62h35v6iuoi627xxx3ibr6zengot5lxhswl6.py # Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] # Source node to ATen node mapping: # interpolate_1 => add_4, add_5, convert_element_type_4, convert_element_type_5, iota_2, mul_4, mul_5 # Graph fragment: # %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_2, 1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 0), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_4, torch.float32), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.5), kwargs = {}) # %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_5, torch.int64), kwargs = {}) triton_poi_fused__to_copy_add_arange_mul_6 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_6(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/lv/clv5p5bw7kevxyriy5udscthz7oc7poatiwopodimqsbgd2tvcj3.py # Topologically Sorted Source Nodes: [x_6, interpolate_1, x_7], Original ATen: [aten.convolution, aten._unsafe_index, aten.relu] # Source node to ATen node mapping: # interpolate_1 => _unsafe_index_1 # x_6 => convolution_3 # x_7 => relu_3 # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution_3, [None, None, %unsqueeze_1, %convert_element_type_5]), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%_unsafe_index_1,), kwargs = {}) triton_poi_fused__unsafe_index_convolution_relu_7 = async_compile.triton('triton_poi_fused__unsafe_index_convolution_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_convolution_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 64) % 64 x0 = xindex % 64 x5 = (xindex // 4096) x2 = (xindex // 4096) % 32 x6 = xindex tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + (32*tmp4) + (1024*x5)), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x6), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jn/cjnns442wqahvj2npqd4cst7zysu23dbtmnkvsx4mt4onaexwnoo.py # Topologically Sorted Source Nodes: [conv2d_4, x_8], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # x_8 => sigmoid # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_sigmoid_8 = async_compile.triton('triton_poi_fused_convolution_sigmoid_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (32, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32, ), (1, )) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32, ), (1, )) assert_size_stride(primals_10, (1, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 131072, grid=grid(131072), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 32, 16, 16), (8192, 256, 16, 1)) buf9 = empty_strided_cuda((32, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_4.run(buf9, 32, grid=grid(32), stream=stream0) buf10 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4, interpolate, x_5], Original ATen: [aten.convolution, aten._unsafe_index, aten.relu] triton_poi_fused__unsafe_index_convolution_relu_5.run(buf9, buf8, primals_7, buf10, 131072, grid=grid(131072), stream=stream0) del buf8 del primals_7 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf12 = empty_strided_cuda((64, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [interpolate_1], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy] triton_poi_fused__to_copy_add_arange_mul_6.run(buf12, 64, grid=grid(64), stream=stream0) buf13 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6, interpolate_1, x_7], Original ATen: [aten.convolution, aten._unsafe_index, aten.relu] triton_poi_fused__unsafe_index_convolution_relu_7.run(buf12, buf11, primals_9, buf13, 524288, grid=grid(524288), stream=stream0) del buf11 del primals_9 # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d_4, x_8], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_8.run(buf15, primals_11, 16384, grid=grid(16384), stream=stream0) del primals_11 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf12, buf13, buf15, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((1, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn.functional as F import torch.optim as optim import torch as T from torch.nn import Conv2d from torch.nn import MaxPool2d from torch.nn import BCELoss class Autoencoder(Module): def __init__(self, lr=0.002): super(Autoencoder, self).__init__() self.encode1 = Conv2d(1, 32, 3, padding=1) self.encode2 = Conv2d(32, 32, 3, padding=1) self.pool = MaxPool2d(2, 2) self.decode1 = Conv2d(32, 32, 3, padding=1) self.decode2 = Conv2d(32, 32, 3, padding=1) self.output = Conv2d(32, 1, 3, padding=1) self.loss = BCELoss() self.optimizer = optim.Adadelta(self.parameters(), lr=lr) self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu:0') self def forward(self, inputs): x = F.relu(self.encode1(inputs)) x = self.pool(x) x = F.relu(self.encode2(x)) x = self.pool(x) x = self.decode1(x) x = F.relu(F.interpolate(x, scale_factor=2, mode='nearest')) x = self.decode2(x) x = F.relu(F.interpolate(x, scale_factor=2, mode='nearest')) x = F.sigmoid(self.output(x)) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module import torch.optim as optim import torch as T from torch.nn import Conv2d from torch.nn import MaxPool2d from torch.nn import BCELoss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 32 % 32 x0 = xindex % 32 x5 = xindex // 1024 x2 = xindex // 1024 % 32 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 16, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x6, tmp13, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_6(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x5 = xindex // 4096 x2 = xindex // 4096 % 32 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x6, tmp13, None) @triton.jit def triton_poi_fused_convolution_sigmoid_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (1, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(131072)](buf1, buf2, buf3, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6, buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 32, 16, 16), (8192, 256, 16, 1)) buf9 = empty_strided_cuda((32,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_4[grid(32)](buf9, 32, XBLOCK=32, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_relu_5[grid(131072)](buf9, buf8, primals_7, buf10, 131072, XBLOCK=512, num_warps=8, num_stages=1) del buf8 del primals_7 buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 32, 32, 32), (32768, 1024, 32, 1)) buf12 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_6[grid(64)](buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_relu_7[grid(524288)](buf12, buf11, primals_9, buf13, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf11 del primals_9 buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_sigmoid_8[grid(16384)](buf15, primals_11, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (buf15, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf12, buf13, buf15) class AutoencoderNew(Module): def __init__(self, lr=0.002): super(AutoencoderNew, self).__init__() self.encode1 = Conv2d(1, 32, 3, padding=1) self.encode2 = Conv2d(32, 32, 3, padding=1) self.pool = MaxPool2d(2, 2) self.decode1 = Conv2d(32, 32, 3, padding=1) self.decode2 = Conv2d(32, 32, 3, padding=1) self.output = Conv2d(32, 1, 3, padding=1) self.loss = BCELoss() self.optimizer = optim.Adadelta(self.parameters(), lr=lr) self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu:0') self def forward(self, input_0): primals_1 = self.encode1.weight primals_2 = self.encode1.bias primals_4 = self.encode2.weight primals_5 = self.encode2.bias primals_6 = self.decode1.weight primals_7 = self.decode1.bias primals_8 = self.decode2.weight primals_9 = self.decode2.bias primals_10 = self.output.weight primals_11 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
ShivangMathur1/Small-Pytorch-Projects
Autoencoder
false
1,895
[ "MIT" ]
0
aebc6e81103fe2a6830caeedc1b17227e211a6e5
https://github.com/ShivangMathur1/Small-Pytorch-Projects/tree/aebc6e81103fe2a6830caeedc1b17227e211a6e5
from torch.nn import Module import torch import torch.nn.functional as F import torch.optim as optim import torch as T from torch.nn import Conv2d from torch.nn import MaxPool2d from torch.nn import BCELoss class Model(Module): def __init__(self, lr=0.002): super().__init__() self.encode1 = Conv2d(1, 32, 3, padding=1) self.encode2 = Conv2d(32, 32, 3, padding=1) self.pool = MaxPool2d(2, 2) self.decode1 = Conv2d(32, 32, 3, padding=1) self.decode2 = Conv2d(32, 32, 3, padding=1) self.output = Conv2d(32, 1, 3, padding=1) self.loss = BCELoss() self.optimizer = optim.Adadelta(self.parameters(), lr=lr) self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu:0') self def forward(self, inputs): x = F.relu(self.encode1(inputs)) x = self.pool(x) x = F.relu(self.encode2(x)) x = self.pool(x) x = self.decode1(x) x = F.relu(F.interpolate(x, scale_factor=2, mode='nearest')) x = self.decode2(x) x = F.relu(F.interpolate(x, scale_factor=2, mode='nearest')) x = F.sigmoid(self.output(x)) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return []
ColorJitterLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/k5/ck5gtelstphbmeyxj47cxu7yycqfyjvkdcsi2irtvkcdext2kdfh.py # Topologically Sorted Source Nodes: [hsv], Original ATen: [aten.stack] # Source node to ATen node mapping: # hsv => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%div, %sub_3, %getitem], 1), kwargs = {}) triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 12 x0 = xindex % 4 x2 = (xindex // 48) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr0 + (32 + x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 1.7320508075688772 tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr0 + (x0 + (4*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp11 = 2.0 tmp12 = tmp10 * tmp11 tmp13 = tmp12 - tmp5 tmp14 = tmp13 - tmp6 tmp15 = libdevice.atan2(tmp9, tmp14) tmp16 = 6.283185307179586 tmp17 = tmp15 % tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = tmp17 != tmp18 tmp20 = libdevice.signbit(tmp17) if (tmp17).dtype is tl.float32 else tmp17 < 0 tmp21 = libdevice.signbit(tmp16) if (tmp16).dtype is tl.float32 else tmp16 < 0 tmp22 = tmp20 != tmp21 tmp23 = tmp19 & tmp22 tmp24 = tmp17 + tmp16 tmp25 = tl.where(tmp23, tmp24, tmp17) tmp26 = 0.15915494309189535 tmp27 = tmp25 * tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp4, tmp27, tmp28) tmp30 = tmp0 >= tmp3 tmp31 = tl.full([1], 8, tl.int64) tmp32 = tmp0 < tmp31 tmp33 = tmp30 & tmp32 tmp34 = tl.load(in_ptr0 + (x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0) tmp35 = tl.load(in_ptr0 + (16 + x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0) tmp36 = triton_helpers.minimum(tmp34, tmp35) tmp37 = tl.load(in_ptr0 + (32 + x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0) tmp38 = triton_helpers.minimum(tmp36, tmp37) tmp39 = tl.load(in_ptr0 + (48 + x0 + (4*((-4) + x1)) + (64*x2)), tmp33 & xmask, other=0.0) tmp40 = triton_helpers.minimum(tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp34, tmp35) tmp42 = triton_helpers.maximum(tmp41, tmp37) tmp43 = triton_helpers.maximum(tmp42, tmp39) tmp44 = 1e-08 tmp45 = tmp43 + tmp44 tmp46 = tmp40 / tmp45 tmp47 = 1.0 tmp48 = tmp47 - tmp46 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp33, tmp48, tmp49) tmp51 = tmp0 >= tmp31 tmp52 = tl.full([1], 12, tl.int64) tmp53 = tmp0 < tmp52 tmp54 = tl.load(in_ptr0 + (x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0) tmp55 = tl.load(in_ptr0 + (16 + x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0) tmp56 = triton_helpers.maximum(tmp54, tmp55) tmp57 = tl.load(in_ptr0 + (32 + x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0) tmp58 = triton_helpers.maximum(tmp56, tmp57) tmp59 = tl.load(in_ptr0 + (48 + x0 + (4*((-8) + x1)) + (64*x2)), tmp51 & xmask, other=0.0) tmp60 = triton_helpers.maximum(tmp58, tmp59) tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype) tmp62 = tl.where(tmp51, tmp60, tmp61) tmp63 = tl.where(tmp33, tmp50, tmp62) tmp64 = tl.where(tmp4, tmp29, tmp63) tl.store(out_ptr0 + (x3), tmp64, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/gg/cggxgz5kv3f6v57dm57mpxkp77xbxjw2mh4mscq7a4rphyiqpca4.py # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] # Source node to ATen node mapping: # setitem => full_default_3, index_put # Graph fragment: # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %index_put : [num_users=4] = call_function[target=torch.ops.aten.index_put_.default](args = (%view, [%bitwise_not], %full_default_3), kwargs = {}) triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 == tmp0 tmp2 = tl_math.abs(tmp0) tmp3 = float("inf") tmp4 = tmp2 != tmp3 tmp5 = tmp1 & tmp4 tmp6 = tmp5 == 0 tmp7 = 0.0 tmp8 = tl.where(tmp6, tmp7, tmp0) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tw/ctw7vqtn4wrdd6k3eojcn6wy2cqgcazcuexsxhfewjvf3xjv6ole.py # Topologically Sorted Source Nodes: [f_h], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # f_h => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 1], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_2 = async_compile.triton('triton_poi_fused_new_zeros_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_2(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/v4/cv46aopjpsjvzqshfrlrag6y4ifo6nlt2huynabmbhvbs437klh2.py # Topologically Sorted Source Nodes: [f_s], Original ATen: [aten.new_ones] # Source node to ATen node mapping: # f_s => full_default_1 # Graph fragment: # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 1], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_ones_3 = async_compile.triton('triton_poi_fused_new_ones_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_ones_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_ones_3(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/mq/cmqhtrxejw46pbgdo6fh4f7w3qkoe46hoelc5we53jnftjhog4ar.py # Topologically Sorted Source Nodes: [mul_2, truediv_2, h_1, h_2, setitem_1, mul_3, setitem_2], Original ATen: [aten.mul, aten.div, aten.add, aten.remainder, aten.copy] # Source node to ATen node mapping: # h_1 => add_1 # h_2 => remainder_1 # mul_2 => mul_3 # mul_3 => mul_4 # setitem_1 => copy # setitem_2 => copy_1 # truediv_2 => div_2 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%uniform, 255.0), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, 360.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_4, %div_2), kwargs = {}) # %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%select_int, %add_1, 2, 0, 9223372036854775807), kwargs = {}) # %select_scatter_default : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%index_put, %slice_scatter_default, 1, 0), kwargs = {}) # %remainder_1 : [num_users=1] = call_function[target=torch.ops.aten.remainder.Scalar](args = (%select_6, 1), kwargs = {}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_8, %remainder_1), kwargs = {}) # %select_scatter_default_1 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %copy, 1, 0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_12, %uniform_1), kwargs = {}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_14, %mul_4), kwargs = {}) # %select_scatter_default_2 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %copy_1, 1, 1), kwargs = {}) triton_poi_fused_add_copy_div_mul_remainder_4 = async_compile.triton('triton_poi_fused_add_copy_div_mul_remainder_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_copy_div_mul_remainder_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_copy_div_mul_remainder_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 3 x0 = xindex % 16 x2 = (xindex // 48) x3 = xindex tmp6 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (16 + x0 + (48*x2)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (x3), xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp1 == tmp3 tmp5 = tmp3 == tmp3 tmp8 = 255.0 tmp9 = tmp7 * tmp8 tmp10 = 0.002777777777777778 tmp11 = tmp9 * tmp10 tmp12 = tmp6 + tmp11 tmp13 = tl.where(tmp5, tmp12, tmp6) tmp14 = 1.0 tmp15 = tmp13 % tmp14 tmp16 = tmp15 != tmp3 tmp17 = libdevice.signbit(tmp15) if (tmp15).dtype is tl.float32 else tmp15 < 0 tmp18 = libdevice.signbit(tmp14) if (tmp14).dtype is tl.float32 else tmp14 < 0 tmp19 = tmp17 != tmp18 tmp20 = tmp16 & tmp19 tmp21 = tmp15 + tmp14 tmp22 = tl.where(tmp20, tmp21, tmp15) tmp24 = tl.where(tmp4, tmp12, tmp23) tmp25 = tl.where(tmp4, tmp22, tmp24) tmp27 = tmp25 * tmp26 tmp28 = tmp0 == tmp3 tmp30 = tl.where(tmp28, tmp12, tmp29) tmp31 = tl.where(tmp28, tmp22, tmp30) tmp32 = tl.where(tmp2, tmp27, tmp31) tl.store(out_ptr0 + (x3), tmp32, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/zq/czq45ck4e3bfpgidquurvbk77tkggknyyrok7iefanwqczot3sts.py # Topologically Sorted Source Nodes: [mul_4, setitem_3, x, v, s, c, h_3, mul_6, add_1, k, sub_4, t, t_1, mul_7, x_1, means, sub_6, mul_8, x_2, inputs], Original ATen: [aten.mul, aten.copy, aten.clamp, aten.index, aten.add, aten.remainder, aten.rsub, aten.minimum, aten.sub, aten.mean] # Source node to ATen node mapping: # add_1 => add_2 # c => mul_6 # h_3 => index # inputs => clamp_max_2, clamp_min_2 # k => remainder_2 # means => mean # mul_4 => mul_5 # mul_6 => mul_7 # mul_7 => mul_8 # mul_8 => mul_9 # s => index_1 # setitem_3 => copy_2 # sub_4 => sub_4 # sub_6 => sub_6 # t => minimum # t_1 => clamp_max_1, clamp_min_1 # v => index_2 # x => clamp_max, clamp_min # x_1 => sub_5 # x_2 => add_3 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_18, %uniform_2), kwargs = {}) # %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_20, %mul_5), kwargs = {}) # %select_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_2, %copy_2, 1, 2), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%select_scatter_default_3, 0), kwargs = {}) # %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {}) # %index_2 : [num_users=2] = call_function[target=torch.ops.aten.index.Tensor](args = (%clamp_max, [None, %full_default_6]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%clamp_max, [None, %full_default_5]), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index_2, %index_1), kwargs = {}) # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%clamp_max, [None, %full_default_4]), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, 6), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_7), kwargs = {}) # %remainder_2 : [num_users=2] = call_function[target=torch.ops.aten.remainder.Scalar](args = (%add_2, 6), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4.0, %remainder_2), kwargs = {}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%remainder_2, %sub_4), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%minimum, 0), kwargs = {}) # %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %clamp_max_1), kwargs = {}) # %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%index_2, %mul_8), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%sub_5, [2, 3], True), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_5, %mean), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %uniform_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mean), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_3, 0), kwargs = {}) # %clamp_max_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1), kwargs = {}) triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5 = async_compile.triton('triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5(in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 12 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 3 r2 = rindex x1 = (xindex // 3) x3 = xindex tmp13 = tl.load(in_ptr0 + (32 + r2 + (48*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (r2 + (48*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr0 + (16 + r2 + (48*x1)), xmask, eviction_policy='evict_last', other=0.0) tmp57 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1, 1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1, 1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 3.0 tmp6 = 1.0 tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = 5.0 tmp9 = tl.where(tmp2, tmp8, tmp7) tmp10 = tl.full([1, 1], 0, tl.int32) tmp11 = tl.full([1, 1], 2, tl.int32) tmp12 = tmp10 == tmp11 tmp15 = tmp13 * tmp14 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = triton_helpers.minimum(tmp19, tmp6) tmp21 = 6.0 tmp22 = tmp20 * tmp21 tmp23 = tmp9 + tmp22 tmp24 = tmp23 % tmp21 tmp25 = tmp24 != tmp10 tmp26 = libdevice.signbit(tmp24) if (tmp24).dtype is tl.float32 else tmp24 < 0 tmp27 = libdevice.signbit(tmp21) if (tmp21).dtype is tl.float32 else tmp21 < 0 tmp28 = tmp26 != tmp27 tmp29 = tmp25 & tmp28 tmp30 = tmp24 + tmp21 tmp31 = tl.where(tmp29, tmp30, tmp24) tmp32 = 4.0 tmp33 = tmp32 - tmp31 tmp34 = triton_helpers.minimum(tmp31, tmp33) tmp35 = triton_helpers.maximum(tmp34, tmp18) tmp36 = tmp11 == tmp11 tmp37 = tl.where(tmp36, tmp15, tmp13) tmp38 = triton_helpers.maximum(tmp37, tmp18) tmp39 = triton_helpers.minimum(tmp38, tmp6) tmp40 = tl.full([1, 1], 1, tl.int32) tmp41 = tmp40 == tmp11 tmp43 = tl.where(tmp41, tmp15, tmp42) tmp44 = triton_helpers.maximum(tmp43, tmp18) tmp45 = triton_helpers.minimum(tmp44, tmp6) tmp46 = tmp39 * tmp45 tmp47 = triton_helpers.minimum(tmp35, tmp6) tmp48 = tmp46 * tmp47 tmp49 = tmp39 - tmp48 tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.where(xmask, tmp50, 0) tmp53 = tl.sum(tmp52, 1)[:, None] tmp54 = 16.0 tmp55 = tmp53 / tmp54 tmp56 = tmp49 - tmp55 tmp58 = tmp56 * tmp57 tmp59 = tmp58 + tmp55 tmp60 = triton_helpers.maximum(tmp59, tmp18) tmp61 = triton_helpers.minimum(tmp60, tmp6) tl.store(out_ptr1 + (r2 + (16*x3)), tmp61, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12, 4), (48, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [hsv], Original ATen: [aten.stack] stream0 = get_raw_stream(0) triton_poi_fused_stack_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0) del arg0_1 # Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put] triton_poi_fused_index_put_lift_fresh_1.run(buf0, buf0, 192, grid=grid(192), stream=stream0) buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [f_h], Original ATen: [aten.new_zeros] triton_poi_fused_new_zeros_2.run(buf7, 4, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [f_h, uniform_], Original ATen: [aten.new_zeros, aten.uniform] buf8 = torch.ops.aten.uniform.default(buf7, -4.0, 4.0) buf9 = buf8 del buf8 buf10 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [f_s], Original ATen: [aten.new_ones] triton_poi_fused_new_ones_3.run(buf10, 4, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [f_s, f_s_1], Original ATen: [aten.new_ones, aten.uniform] buf11 = torch.ops.aten.uniform.default(buf10, 0.0, 5.0) del buf10 buf12 = buf11 del buf11 buf13 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_2, truediv_2, h_1, h_2, setitem_1, mul_3, setitem_2], Original ATen: [aten.mul, aten.div, aten.add, aten.remainder, aten.copy] triton_poi_fused_add_copy_div_mul_remainder_4.run(buf0, buf9, buf12, buf13, 192, grid=grid(192), stream=stream0) del buf12 buf14 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [f_v], Original ATen: [aten.new_ones] triton_poi_fused_new_ones_3.run(buf14, 4, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [f_v, f_v_1], Original ATen: [aten.new_ones, aten.uniform] buf15 = torch.ops.aten.uniform.default(buf14, 0.0, 5.0) buf16 = buf15 del buf15 buf20 = reinterpret_tensor(buf14, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf14 # reuse # Topologically Sorted Source Nodes: [factor], Original ATen: [aten.uniform] buf21 = torch.ops.aten.uniform.default(buf20, 0.0, 5.0) del buf20 buf22 = buf21 del buf21 buf23 = reinterpret_tensor(buf0, (4, 3, 4, 4), (48, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mul_4, setitem_3, x, v, s, c, h_3, mul_6, add_1, k, sub_4, t, t_1, mul_7, x_1, means, sub_6, mul_8, x_2, inputs], Original ATen: [aten.mul, aten.copy, aten.clamp, aten.index, aten.add, aten.remainder, aten.rsub, aten.minimum, aten.sub, aten.mean] triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5.run(buf13, buf16, buf22, buf23, 12, 16, grid=grid(12), stream=stream0) del buf13 del buf16 del buf22 return (buf23, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.autograd import Function import math import numbers import torch import numpy as np import torch.nn as nn import torch.utils.cpp_extension def hsv2rgb(hsv): """Convert a 4-d HSV tensor to the RGB counterpart. >>> %timeit hsv2rgb_lookup(hsv) 2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) >>> %timeit hsv2rgb(rgb) 298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> torch.allclose(hsv2rgb(hsv), hsv2rgb_lookup(hsv), atol=1e-6) True References [1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative """ h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]] c = v * s n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1) k = (n + h * 6) % 6 t = torch.min(k, 4.0 - k) t = torch.clamp(t, 0, 1) return v - c * t def rgb2hsv(rgb): """Convert a 4-d RGB tensor to the HSV counterpart. Here, we compute hue using atan2() based on the definition in [1], instead of using the common lookup table approach as in [2, 3]. Those values agree when the angle is a multiple of 30°, otherwise they may differ at most ~1.2°. >>> %timeit rgb2hsv_lookup(rgb) 1.07 ms ± 2.96 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit rgb2hsv(rgb) 380 µs ± 555 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> (rgb2hsv_lookup(rgb) - rgb2hsv(rgb)).abs().max() tensor(0.0031, device='cuda:0') References [1] https://en.wikipedia.org/wiki/Hue [2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html [3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212 """ r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :] Cmax = rgb.max(1)[0] Cmin = rgb.min(1)[0] hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b) hue = hue % (2 * math.pi) / (2 * math.pi) saturate = 1 - Cmin / (Cmax + 1e-08) value = Cmax hsv = torch.stack([hue, saturate, value], dim=1) hsv[~torch.isfinite(hsv)] = 0.0 return hsv class RandomHSVFunction(Function): @staticmethod def forward(ctx, x, f_h, f_s, f_v): x = rgb2hsv(x) h = x[:, 0, :, :] h += f_h * 255.0 / 360.0 h = h % 1 x[:, 0, :, :] = h x[:, 1, :, :] = x[:, 1, :, :] * f_s x[:, 2, :, :] = x[:, 2, :, :] * f_v x = torch.clamp(x, 0, 1) x = hsv2rgb(x) return x @staticmethod def backward(ctx, grad_output): grad_input = None if ctx.needs_input_grad[0]: grad_input = grad_output.clone() return grad_input, None, None, None class ColorJitterLayer(nn.Module): def __init__(self, brightness, contrast, saturation, hue): super(ColorJitterLayer, self).__init__() self.brightness = self._check_input(brightness, 'brightness') self.contrast = self._check_input(contrast, 'contrast') self.saturation = self._check_input(saturation, 'saturation') self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5 ), clip_first_on_zero=False) def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): if isinstance(value, numbers.Number): if value < 0: raise ValueError( 'If {} is a single number, it must be non negative.'. format(name)) value = [center - value, center + value] if clip_first_on_zero: value[0] = max(value[0], 0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: raise ValueError('{} values should be between {}'.format( name, bound)) else: raise TypeError( '{} should be a single number or a list/tuple with lenght 2.' .format(name)) if value[0] == value[1] == center: value = None return value def adjust_contrast(self, x): if self.contrast: factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast) means = torch.mean(x, dim=[2, 3], keepdim=True) x = (x - means) * factor + means return torch.clamp(x, 0, 1) def adjust_hsv(self, x): f_h = x.new_zeros(x.size(0), 1, 1) f_s = x.new_ones(x.size(0), 1, 1) f_v = x.new_ones(x.size(0), 1, 1) if self.hue: f_h.uniform_(*self.hue) if self.saturation: f_s = f_s.uniform_(*self.saturation) if self.brightness: f_v = f_v.uniform_(*self.brightness) return RandomHSVFunction.apply(x, f_h, f_s, f_v) def transform(self, inputs): if np.random.rand() > 0.5: transforms = [self.adjust_contrast, self.adjust_hsv] else: transforms = [self.adjust_hsv, self.adjust_contrast] for t in transforms: inputs = t(inputs) return inputs def forward(self, inputs): return self.transform(inputs) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'brightness': 4, 'contrast': 4, 'saturation': 4, 'hue': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch.autograd import Function import math import numbers import numpy as np import torch.nn as nn import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 12 x0 = xindex % 4 x2 = xindex // 48 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr0 + (32 + x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp7 = tmp5 - tmp6 tmp8 = 1.7320508075688772 tmp9 = tmp7 * tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp11 = 2.0 tmp12 = tmp10 * tmp11 tmp13 = tmp12 - tmp5 tmp14 = tmp13 - tmp6 tmp15 = libdevice.atan2(tmp9, tmp14) tmp16 = 6.283185307179586 tmp17 = tmp15 % tmp16 tmp18 = tl.full([1], 0, tl.int32) tmp19 = tmp17 != tmp18 tmp20 = libdevice.signbit(tmp17 ) if tmp17.dtype is tl.float32 else tmp17 < 0 tmp21 = libdevice.signbit(tmp16 ) if tmp16.dtype is tl.float32 else tmp16 < 0 tmp22 = tmp20 != tmp21 tmp23 = tmp19 & tmp22 tmp24 = tmp17 + tmp16 tmp25 = tl.where(tmp23, tmp24, tmp17) tmp26 = 0.15915494309189535 tmp27 = tmp25 * tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp4, tmp27, tmp28) tmp30 = tmp0 >= tmp3 tmp31 = tl.full([1], 8, tl.int64) tmp32 = tmp0 < tmp31 tmp33 = tmp30 & tmp32 tmp34 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp35 = tl.load(in_ptr0 + (16 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp36 = triton_helpers.minimum(tmp34, tmp35) tmp37 = tl.load(in_ptr0 + (32 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp38 = triton_helpers.minimum(tmp36, tmp37) tmp39 = tl.load(in_ptr0 + (48 + x0 + 4 * (-4 + x1) + 64 * x2), tmp33 & xmask, other=0.0) tmp40 = triton_helpers.minimum(tmp38, tmp39) tmp41 = triton_helpers.maximum(tmp34, tmp35) tmp42 = triton_helpers.maximum(tmp41, tmp37) tmp43 = triton_helpers.maximum(tmp42, tmp39) tmp44 = 1e-08 tmp45 = tmp43 + tmp44 tmp46 = tmp40 / tmp45 tmp47 = 1.0 tmp48 = tmp47 - tmp46 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp33, tmp48, tmp49) tmp51 = tmp0 >= tmp31 tl.full([1], 12, tl.int64) tmp54 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp55 = tl.load(in_ptr0 + (16 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp56 = triton_helpers.maximum(tmp54, tmp55) tmp57 = tl.load(in_ptr0 + (32 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp58 = triton_helpers.maximum(tmp56, tmp57) tmp59 = tl.load(in_ptr0 + (48 + x0 + 4 * (-8 + x1) + 64 * x2), tmp51 & xmask, other=0.0) tmp60 = triton_helpers.maximum(tmp58, tmp59) tmp61 = tl.full(tmp60.shape, 0.0, tmp60.dtype) tmp62 = tl.where(tmp51, tmp60, tmp61) tmp63 = tl.where(tmp33, tmp50, tmp62) tmp64 = tl.where(tmp4, tmp29, tmp63) tl.store(out_ptr0 + x3, tmp64, xmask) @triton.jit def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 == tmp0 tmp2 = tl_math.abs(tmp0) tmp3 = float('inf') tmp4 = tmp2 != tmp3 tmp5 = tmp1 & tmp4 tmp6 = tmp5 == 0 tmp7 = 0.0 tmp8 = tl.where(tmp6, tmp7, tmp0) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_new_zeros_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_new_ones_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 1.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_copy_div_mul_remainder_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 3 x0 = xindex % 16 x2 = xindex // 48 x3 = xindex tmp6 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + x3, xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = tmp1 == tmp3 tmp5 = tmp3 == tmp3 tmp8 = 255.0 tmp9 = tmp7 * tmp8 tmp10 = 0.002777777777777778 tmp11 = tmp9 * tmp10 tmp12 = tmp6 + tmp11 tmp13 = tl.where(tmp5, tmp12, tmp6) tmp14 = 1.0 tmp15 = tmp13 % tmp14 tmp16 = tmp15 != tmp3 tmp17 = libdevice.signbit(tmp15 ) if tmp15.dtype is tl.float32 else tmp15 < 0 tmp18 = libdevice.signbit(tmp14 ) if tmp14.dtype is tl.float32 else tmp14 < 0 tmp19 = tmp17 != tmp18 tmp20 = tmp16 & tmp19 tmp21 = tmp15 + tmp14 tmp22 = tl.where(tmp20, tmp21, tmp15) tmp24 = tl.where(tmp4, tmp12, tmp23) tmp25 = tl.where(tmp4, tmp22, tmp24) tmp27 = tmp25 * tmp26 tmp28 = tmp0 == tmp3 tmp30 = tl.where(tmp28, tmp12, tmp29) tmp31 = tl.where(tmp28, tmp22, tmp30) tmp32 = tl.where(tmp2, tmp27, tmp31) tl.store(out_ptr0 + x3, tmp32, xmask) @triton.jit def triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5( in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 12 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 3 r2 = rindex x1 = xindex // 3 x3 = xindex tmp13 = tl.load(in_ptr0 + (32 + r2 + 48 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (r2 + 48 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr0 + (16 + r2 + 48 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp57 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1, 1], 1, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1, 1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = 3.0 tmp6 = 1.0 tmp7 = tl.where(tmp4, tmp5, tmp6) tmp8 = 5.0 tmp9 = tl.where(tmp2, tmp8, tmp7) tmp10 = tl.full([1, 1], 0, tl.int32) tmp11 = tl.full([1, 1], 2, tl.int32) tmp12 = tmp10 == tmp11 tmp15 = tmp13 * tmp14 tmp17 = tl.where(tmp12, tmp15, tmp16) tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = triton_helpers.minimum(tmp19, tmp6) tmp21 = 6.0 tmp22 = tmp20 * tmp21 tmp23 = tmp9 + tmp22 tmp24 = tmp23 % tmp21 tmp25 = tmp24 != tmp10 tmp26 = libdevice.signbit(tmp24 ) if tmp24.dtype is tl.float32 else tmp24 < 0 tmp27 = libdevice.signbit(tmp21 ) if tmp21.dtype is tl.float32 else tmp21 < 0 tmp28 = tmp26 != tmp27 tmp29 = tmp25 & tmp28 tmp30 = tmp24 + tmp21 tmp31 = tl.where(tmp29, tmp30, tmp24) tmp32 = 4.0 tmp33 = tmp32 - tmp31 tmp34 = triton_helpers.minimum(tmp31, tmp33) tmp35 = triton_helpers.maximum(tmp34, tmp18) tmp36 = tmp11 == tmp11 tmp37 = tl.where(tmp36, tmp15, tmp13) tmp38 = triton_helpers.maximum(tmp37, tmp18) tmp39 = triton_helpers.minimum(tmp38, tmp6) tmp40 = tl.full([1, 1], 1, tl.int32) tmp41 = tmp40 == tmp11 tmp43 = tl.where(tmp41, tmp15, tmp42) tmp44 = triton_helpers.maximum(tmp43, tmp18) tmp45 = triton_helpers.minimum(tmp44, tmp6) tmp46 = tmp39 * tmp45 tmp47 = triton_helpers.minimum(tmp35, tmp6) tmp48 = tmp46 * tmp47 tmp49 = tmp39 - tmp48 tmp50 = tl.broadcast_to(tmp49, [XBLOCK, RBLOCK]) tmp52 = tl.where(xmask, tmp50, 0) tmp53 = tl.sum(tmp52, 1)[:, None] tmp54 = 16.0 tmp55 = tmp53 / tmp54 tmp56 = tmp49 - tmp55 tmp58 = tmp56 * tmp57 tmp59 = tmp58 + tmp55 tmp60 = triton_helpers.maximum(tmp59, tmp18) tmp61 = triton_helpers.minimum(tmp60, tmp6) tl.store(out_ptr1 + (r2 + 16 * x3), tmp61, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12, 4), (48, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(192)](arg0_1, buf0, 192, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 triton_poi_fused_index_put_lift_fresh_1[grid(192)](buf0, buf0, 192, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) triton_poi_fused_new_zeros_2[grid(4)](buf7, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf8 = torch.ops.aten.uniform.default(buf7, -4.0, 4.0) buf9 = buf8 del buf8 buf10 = buf7 del buf7 triton_poi_fused_new_ones_3[grid(4)](buf10, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf11 = torch.ops.aten.uniform.default(buf10, 0.0, 5.0) del buf10 buf12 = buf11 del buf11 buf13 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) triton_poi_fused_add_copy_div_mul_remainder_4[grid(192)](buf0, buf9, buf12, buf13, 192, XBLOCK=128, num_warps=4, num_stages=1) del buf12 buf14 = buf9 del buf9 triton_poi_fused_new_ones_3[grid(4)](buf14, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf15 = torch.ops.aten.uniform.default(buf14, 0.0, 5.0) buf16 = buf15 del buf15 buf20 = reinterpret_tensor(buf14, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf14 buf21 = torch.ops.aten.uniform.default(buf20, 0.0, 5.0) del buf20 buf22 = buf21 del buf21 buf23 = reinterpret_tensor(buf0, (4, 3, 4, 4), (48, 16, 4, 1), 0) del buf0 triton_per_fused_add_clamp_copy_index_mean_minimum_mul_remainder_rsub_sub_5[ grid(12)](buf13, buf16, buf22, buf23, 12, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf13 del buf16 del buf22 return buf23, def hsv2rgb(hsv): """Convert a 4-d HSV tensor to the RGB counterpart. >>> %timeit hsv2rgb_lookup(hsv) 2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) >>> %timeit hsv2rgb(rgb) 298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> torch.allclose(hsv2rgb(hsv), hsv2rgb_lookup(hsv), atol=1e-6) True References [1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative """ h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]] c = v * s n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1) k = (n + h * 6) % 6 t = torch.min(k, 4.0 - k) t = torch.clamp(t, 0, 1) return v - c * t def rgb2hsv(rgb): """Convert a 4-d RGB tensor to the HSV counterpart. Here, we compute hue using atan2() based on the definition in [1], instead of using the common lookup table approach as in [2, 3]. Those values agree when the angle is a multiple of 30°, otherwise they may differ at most ~1.2°. >>> %timeit rgb2hsv_lookup(rgb) 1.07 ms ± 2.96 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit rgb2hsv(rgb) 380 µs ± 555 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> (rgb2hsv_lookup(rgb) - rgb2hsv(rgb)).abs().max() tensor(0.0031, device='cuda:0') References [1] https://en.wikipedia.org/wiki/Hue [2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html [3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212 """ r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :] Cmax = rgb.max(1)[0] Cmin = rgb.min(1)[0] hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b) hue = hue % (2 * math.pi) / (2 * math.pi) saturate = 1 - Cmin / (Cmax + 1e-08) value = Cmax hsv = torch.stack([hue, saturate, value], dim=1) hsv[~torch.isfinite(hsv)] = 0.0 return hsv class RandomHSVFunction(Function): @staticmethod def forward(ctx, x, f_h, f_s, f_v): x = rgb2hsv(x) h = x[:, 0, :, :] h += f_h * 255.0 / 360.0 h = h % 1 x[:, 0, :, :] = h x[:, 1, :, :] = x[:, 1, :, :] * f_s x[:, 2, :, :] = x[:, 2, :, :] * f_v x = torch.clamp(x, 0, 1) x = hsv2rgb(x) return x @staticmethod def backward(ctx, grad_output): grad_input = None if ctx.needs_input_grad[0]: grad_input = grad_output.clone() return grad_input, None, None, None class ColorJitterLayerNew(nn.Module): def __init__(self, brightness, contrast, saturation, hue): super(ColorJitterLayerNew, self).__init__() self.brightness = self._check_input(brightness, 'brightness') self.contrast = self._check_input(contrast, 'contrast') self.saturation = self._check_input(saturation, 'saturation') self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5 ), clip_first_on_zero=False) def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): if isinstance(value, numbers.Number): if value < 0: raise ValueError( 'If {} is a single number, it must be non negative.'. format(name)) value = [center - value, center + value] if clip_first_on_zero: value[0] = max(value[0], 0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: raise ValueError('{} values should be between {}'.format( name, bound)) else: raise TypeError( '{} should be a single number or a list/tuple with lenght 2.' .format(name)) if value[0] == value[1] == center: value = None return value def adjust_contrast(self, x): if self.contrast: factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast) means = torch.mean(x, dim=[2, 3], keepdim=True) x = (x - means) * factor + means return torch.clamp(x, 0, 1) def adjust_hsv(self, x): f_h = x.new_zeros(x.size(0), 1, 1) f_s = x.new_ones(x.size(0), 1, 1) f_v = x.new_ones(x.size(0), 1, 1) if self.hue: f_h.uniform_(*self.hue) if self.saturation: f_s = f_s.uniform_(*self.saturation) if self.brightness: f_v = f_v.uniform_(*self.brightness) return RandomHSVFunction.apply(x, f_h, f_s, f_v) def transform(self, inputs): if np.random.rand() > 0.5: transforms = [self.adjust_contrast, self.adjust_hsv] else: transforms = [self.adjust_hsv, self.adjust_contrast] for t in transforms: inputs = t(inputs) return inputs def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ParthaEth/PyTorch-StudioGAN
ColorJitterLayer
false
1,896
[ "MIT" ]
0
16dd84415e4b7f4667cb1b1e0ef3fc04edf6b5a9
https://github.com/ParthaEth/PyTorch-StudioGAN/tree/16dd84415e4b7f4667cb1b1e0ef3fc04edf6b5a9
from torch.autograd import Function import math import numbers import torch import numpy as np import torch.nn as nn import torch.utils.cpp_extension def hsv2rgb(hsv): """Convert a 4-d HSV tensor to the RGB counterpart. >>> %timeit hsv2rgb_lookup(hsv) 2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) >>> %timeit hsv2rgb(rgb) 298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> torch.allclose(hsv2rgb(hsv), hsv2rgb_lookup(hsv), atol=1e-6) True References [1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative """ h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]] c = v * s n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1) k = (n + h * 6) % 6 t = torch.min(k, 4.0 - k) t = torch.clamp(t, 0, 1) return v - c * t def rgb2hsv(rgb): """Convert a 4-d RGB tensor to the HSV counterpart. Here, we compute hue using atan2() based on the definition in [1], instead of using the common lookup table approach as in [2, 3]. Those values agree when the angle is a multiple of 30°, otherwise they may differ at most ~1.2°. >>> %timeit rgb2hsv_lookup(rgb) 1.07 ms ± 2.96 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> %timeit rgb2hsv(rgb) 380 µs ± 555 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) >>> (rgb2hsv_lookup(rgb) - rgb2hsv(rgb)).abs().max() tensor(0.0031, device='cuda:0') References [1] https://en.wikipedia.org/wiki/Hue [2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html [3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212 """ r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :] Cmax = rgb.max(1)[0] Cmin = rgb.min(1)[0] hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b) hue = hue % (2 * math.pi) / (2 * math.pi) saturate = 1 - Cmin / (Cmax + 1e-08) value = Cmax hsv = torch.stack([hue, saturate, value], dim=1) hsv[~torch.isfinite(hsv)] = 0.0 return hsv class RandomHSVFunction(Function): @staticmethod def forward(ctx, x, f_h, f_s, f_v): x = rgb2hsv(x) h = x[:, 0, :, :] h += f_h * 255.0 / 360.0 h = h % 1 x[:, 0, :, :] = h x[:, 1, :, :] = x[:, 1, :, :] * f_s x[:, 2, :, :] = x[:, 2, :, :] * f_v x = torch.clamp(x, 0, 1) x = hsv2rgb(x) return x @staticmethod def backward(ctx, grad_output): grad_input = None if ctx.needs_input_grad[0]: grad_input = grad_output.clone() return grad_input, None, None, None class Model(nn.Module): def __init__(self, brightness, contrast, saturation, hue): super().__init__() self.brightness = self._check_input(brightness, 'brightness') self.contrast = self._check_input(contrast, 'contrast') self.saturation = self._check_input(saturation, 'saturation') self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5 ), clip_first_on_zero=False) def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): if isinstance(value, numbers.Number): if value < 0: raise ValueError( 'If {} is a single number, it must be non negative.'. format(name)) value = [center - value, center + value] if clip_first_on_zero: value[0] = max(value[0], 0) elif isinstance(value, (tuple, list)) and len(value) == 2: if not bound[0] <= value[0] <= value[1] <= bound[1]: raise ValueError('{} values should be between {}'.format( name, bound)) else: raise TypeError( '{} should be a single number or a list/tuple with lenght 2.' .format(name)) if value[0] == value[1] == center: # ... truncated (>4000 chars) for memory efficiency
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/2o/c2oruxioygcgwikqomugudkti2yvoczrvldtuxwnw6gttjxwwalg.py # Topologically Sorted Source Nodes: [mean, std, sub, add, sqrt, x, mul, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mean => mean # mul => mul # sqrt => sqrt_1 # std => sqrt, var # sub => sub # x => div # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-12), kwargs = {}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 1e-12 tmp27 = tmp25 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = tmp11 / tmp28 tmp30 = tmp0 * tmp29 tmp32 = tmp30 + tmp31 tl.store(in_out_ptr0 + (x2), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mean, std, sub, add, sqrt, x, mul, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0.run(buf1, primals_2, primals_1, primals_3, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf1, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class LayerNorm(nn.Module): """ layer normalization """ def __init__(self, cfg, eps=1e-12): super().__init__() self.gamma = nn.Parameter(torch.ones(cfg.hidden_dim)) self.beta = nn.Parameter(torch.zeros(cfg.hidden_dim)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) x = (x - mean) / torch.sqrt(std + self.eps) return self.gamma * x + self.beta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(hidden_dim=4)}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 1e-12 tmp27 = tmp25 + tmp26 tmp28 = libdevice.sqrt(tmp27) tmp29 = tmp11 / tmp28 tmp30 = tmp0 * tmp29 tmp32 = tmp30 + tmp31 tl.store(in_out_ptr0 + x2, tmp32, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0[grid(256)](buf1, primals_2, primals_1, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf1, primals_1 class LayerNormNew(nn.Module): """ layer normalization """ def __init__(self, cfg, eps=1e-12): super().__init__() self.gamma = nn.Parameter(torch.ones(cfg.hidden_dim)) self.beta = nn.Parameter(torch.zeros(cfg.hidden_dim)) self.eps = eps def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Kyumin-Park/Protein-Chemical-Releativity-BERT
LayerNorm
false
1,897
[ "MIT" ]
0
6a339f4e640d99199f38a00769f5872c2a53ac55
https://github.com/Kyumin-Park/Protein-Chemical-Releativity-BERT/tree/6a339f4e640d99199f38a00769f5872c2a53ac55
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Model(nn.Module): """ layer normalization """ def __init__(self, cfg, eps=1e-12): super().__init__() self.gamma = nn.Parameter(torch.ones(cfg.hidden_dim)) self.beta = nn.Parameter(torch.zeros(cfg.hidden_dim)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) x = (x - mean) / torch.sqrt(std + self.eps) return self.gamma * x + self.beta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
_DynamicGates
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/5l/c5l5ekrfmgr3qnr4agpwzt4jg7o426phqlvb4imwrtbcvhsn5ct2.py # Topologically Sorted Source Nodes: [add, gates], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # gates => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_5), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 12 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 12), (12, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 12), (12, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (12, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, gates], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf2, buf1, primals_5, 768, grid=grid(768), stream=stream0) del buf1 del primals_5 return (buf2, reinterpret_tensor(primals_4, (4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class _DynamicGates(nn.Module): """Internal class to wrap the dynamic gate parameters into a dedicated PyTorch Module""" def __init__(self, cfg: 'Config', input_size: 'int'): super(_DynamicGates, self).__init__() self.cfg = cfg self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 3 * cfg .hidden_size)) self.weight_hh = nn.Parameter(torch.FloatTensor(cfg.hidden_size, 3 * cfg.hidden_size)) self.bias = nn.Parameter(torch.FloatTensor(3 * cfg.hidden_size)) self._reset_parameters() def _reset_parameters(self): """Special initialization of certain model weights.""" nn.init.orthogonal_(self.weight_ih.data) weight_hh_data = torch.eye(self.cfg.hidden_size) weight_hh_data = weight_hh_data.repeat(1, 3) self.weight_hh.data = weight_hh_data nn.init.constant_(self.bias.data, val=0) if self.cfg.initial_forget_bias is not None: self.bias.data[:self.cfg.hidden_size ] = self.cfg.initial_forget_bias def forward(self, h: 'torch.Tensor', x_d: 'torch.Tensor'): gates = h @ self.weight_hh + x_d @ self.weight_ih + self.bias return gates def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(hidden_size=4, initial_forget_bias=4), 'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 12 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 12), (12, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 12), (12, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (12,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 12), (192, 48, 12, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(768)](buf2, buf1, primals_5, 768, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_5 return buf2, reinterpret_tensor(primals_4, (4, 64), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) class _DynamicGatesNew(nn.Module): """Internal class to wrap the dynamic gate parameters into a dedicated PyTorch Module""" def __init__(self, cfg: 'Config', input_size: 'int'): super(_DynamicGatesNew, self).__init__() self.cfg = cfg self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 3 * cfg .hidden_size)) self.weight_hh = nn.Parameter(torch.FloatTensor(cfg.hidden_size, 3 * cfg.hidden_size)) self.bias = nn.Parameter(torch.FloatTensor(3 * cfg.hidden_size)) self._reset_parameters() def _reset_parameters(self): """Special initialization of certain model weights.""" nn.init.orthogonal_(self.weight_ih.data) weight_hh_data = torch.eye(self.cfg.hidden_size) weight_hh_data = weight_hh_data.repeat(1, 3) self.weight_hh.data = weight_hh_data nn.init.constant_(self.bias.data, val=0) if self.cfg.initial_forget_bias is not None: self.bias.data[:self.cfg.hidden_size ] = self.cfg.initial_forget_bias def forward(self, input_0, input_1): primals_1 = self.weight_ih primals_3 = self.weight_hh primals_5 = self.bias primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
NHoose/neuralhydrology
_DynamicGates
false
1,898
[ "BSD-3-Clause" ]
0
f320b417fe747a923ff8ef685ad33fd8b34effad
https://github.com/NHoose/neuralhydrology/tree/f320b417fe747a923ff8ef685ad33fd8b34effad
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Model(nn.Module): """Internal class to wrap the dynamic gate parameters into a dedicated PyTorch Module""" def __init__(self, cfg: 'Config', input_size: 'int'): super().__init__() self.cfg = cfg self.weight_ih = nn.Parameter(torch.FloatTensor(input_size, 3 * cfg .hidden_size)) self.weight_hh = nn.Parameter(torch.FloatTensor(cfg.hidden_size, 3 * cfg.hidden_size)) self.bias = nn.Parameter(torch.FloatTensor(3 * cfg.hidden_size)) self._reset_parameters() def _reset_parameters(self): """Special initialization of certain model weights.""" nn.init.orthogonal_(self.weight_ih.data) weight_hh_data = torch.eye(self.cfg.hidden_size) weight_hh_data = weight_hh_data.repeat(1, 3) self.weight_hh.data = weight_hh_data nn.init.constant_(self.bias.data, val=0) if self.cfg.initial_forget_bias is not None: self.bias.data[:self.cfg.hidden_size ] = self.cfg.initial_forget_bias def forward(self, h: 'torch.Tensor', x_d: 'torch.Tensor'): gates = h @ self.weight_hh + x_d @ self.weight_ih + self.bias return gates def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(hidden_size=4, initial_forget_bias=4), 'input_size': 4}]
NTM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/73/c73dfvvwnsel4knqrcweibmutqlsocw2adin6ttrejza5e3sylp5.py # Topologically Sorted Source Nodes: [e1], Original ATen: [aten.relu] # Source node to ATen node mapping: # e1 => relu # Graph fragment: # %add_tensor_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_4, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_4,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jz/cjzinezasvhkdapb4loejpgey7kmckbefzwmpql73yeknocwxue2.py # Topologically Sorted Source Nodes: [e1_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # e1_1 => relu_1 # Graph fragment: # %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ns/cnszijuiz432ctw37rqktvk3syr2vugzeuatmva3neoizic6f3sq.py # Topologically Sorted Source Nodes: [g1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # g1 => tanh # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_12), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/e3/ce3awwui6azorvya5mylijhwq6pxpyh7wddzrdkjd5loisybpcql.py # Topologically Sorted Source Nodes: [g1_3, g1_4], Original ATen: [aten.tanh, aten.add] # Source node to ATen node mapping: # g1_3 => tanh_3 # g1_4 => add_1 # Graph fragment: # %tanh_3 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%addmm_7,), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh_3, %addmm_2), kwargs = {}) triton_poi_fused_add_tanh_3 = async_compile.triton('triton_poi_fused_add_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tmp1 + tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/iv/civdgwzpphyda4rs4fr3g6w25bprv7bn4anqgivrgzavi7xr5pdl.py # Topologically Sorted Source Nodes: [d1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # d1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_8, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_8, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/a4/ca4u6hbohfqkgchihihlu5hrf3vuqm27r2ncsg7xb6g4ikttl2at.py # Topologically Sorted Source Nodes: [d1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # d1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (500, 4), (4, 1)) assert_size_stride(primals_3, (500, ), (1, )) assert_size_stride(primals_4, (500, 500), (500, 1)) assert_size_stride(primals_5, (500, ), (1, )) assert_size_stride(primals_6, (500, 4), (4, 1)) assert_size_stride(primals_7, (4, 500), (500, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 500), (500, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4, ), (1, )) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4, ), (1, )) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 500), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [e1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 32000, grid=grid(32000), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (500, 500), (1, 500), 0), out=buf2) buf3 = buf2; del buf2 # reuse buf18 = empty_strided_cuda((64, 500), (500, 1), torch.bool) # Topologically Sorted Source Nodes: [e1_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf18, 32000, grid=grid(32000), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 500), (500, 1), torch.float32) # Topologically Sorted Source Nodes: [e1_1], Original ATen: [aten.relu] extern_kernels.addmm(buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 500), (1, 4), 0), alpha=1, beta=1, out=buf4) del buf3 del primals_6 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (500, 4), (1, 500), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, buf4, reinterpret_tensor(primals_9, (500, 4), (1, 500), 0), alpha=1, beta=1, out=buf6) del primals_10 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf7) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [g1], Original ATen: [aten.tanh] triton_poi_fused_tanh_2.run(buf8, primals_12, 256, grid=grid(256), stream=stream0) del primals_12 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf8, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [g1_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_2.run(buf10, primals_14, 256, grid=grid(256), stream=stream0) del primals_14 buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf10, reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf11) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [g1_2], Original ATen: [aten.tanh] triton_poi_fused_tanh_2.run(buf12, primals_16, 256, grid=grid(256), stream=stream0) del primals_16 buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_8], Original ATen: [aten.addmm] extern_kernels.addmm(primals_18, buf12, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_18 buf14 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [g1_3, g1_4], Original ATen: [aten.tanh, aten.add] triton_poi_fused_add_tanh_3.run(buf13, buf5, buf14, 256, grid=grid(256), stream=stream0) buf15 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.addmm] extern_kernels.addmm(primals_20, buf14, reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_20 buf16 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [d1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf15, buf16, 256, grid=grid(256), stream=stream0) buf17 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [d1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf16, buf17, 256, grid=grid(256), stream=stream0) del buf16 return (buf5, buf14, buf17, buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf4, buf5, buf8, buf10, buf12, buf13, buf14, buf17, primals_19, primals_17, primals_15, primals_13, primals_11, primals_9, primals_7, buf18, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((500, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((500, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((500, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 500), (500, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import logging import torch import numpy as np from torch.nn import functional as F import torch.multiprocessing from torch import nn import torch.utils.data class NTM(nn.Module): def __init__(self, opt, hidden_dim=500, l1_strength=0.001): super(NTM, self).__init__() self.input_dim = opt.bow_vocab_size self.topic_num = opt.topic_num topic_num = opt.topic_num self.fc11 = nn.Linear(self.input_dim, hidden_dim) self.fc12 = nn.Linear(hidden_dim, hidden_dim) self.fc21 = nn.Linear(hidden_dim, topic_num) self.fc22 = nn.Linear(hidden_dim, topic_num) self.fcs = nn.Linear(self.input_dim, hidden_dim, bias=False) self.fcg1 = nn.Linear(topic_num, topic_num) self.fcg2 = nn.Linear(topic_num, topic_num) self.fcg3 = nn.Linear(topic_num, topic_num) self.fcg4 = nn.Linear(topic_num, topic_num) self.fcd1 = nn.Linear(topic_num, self.input_dim) self.l1_strength = torch.FloatTensor([l1_strength]) def encode(self, x): e1 = F.relu(self.fc11(x)) e1 = F.relu(self.fc12(e1)) e1 = e1.add(self.fcs(x)) return self.fc21(e1), self.fc22(e1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def generate(self, h): g1 = torch.tanh(self.fcg1(h)) g1 = torch.tanh(self.fcg2(g1)) g1 = torch.tanh(self.fcg3(g1)) g1 = torch.tanh(self.fcg4(g1)) g1 = g1.add(h) return g1 def decode(self, z): d1 = F.softmax(self.fcd1(z), dim=1) return d1 def forward(self, x): mu, logvar = self.encode(x.view(-1, self.input_dim)) z = self.reparameterize(mu, logvar) g = self.generate(z) return z, g, self.decode(g), mu, logvar def print_topic_words(self, vocab_dic, fn, n_top_words=10): beta_exp = self.fcd1.weight.data.cpu().numpy().T logging.info('Writing to %s' % fn) fw = open(fn, 'w') for k, beta_k in enumerate(beta_exp): topic_words = [vocab_dic[w_id] for w_id in np.argsort(beta_k)[: -n_top_words - 1:-1]] None fw.write('{}\n'.format(' '.join(topic_words))) fw.close() def get_topic_words(self): return self.fcd1.weight.T def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(bow_vocab_size=4, topic_num=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import logging import numpy as np from torch.nn import functional as F import torch.multiprocessing from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_add_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tmp1 + tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (500, 4), (4, 1)) assert_size_stride(primals_3, (500,), (1,)) assert_size_stride(primals_4, (500, 500), (500, 1)) assert_size_stride(primals_5, (500,), (1,)) assert_size_stride(primals_6, (500, 4), (4, 1)) assert_size_stride(primals_7, (4, 500), (500, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 500), (500, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 500), (500, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 500), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(32000)](buf1, primals_3, 32000, XBLOCK =256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 500), (500, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (500, 500), ( 1, 500), 0), out=buf2) buf3 = buf2 del buf2 buf18 = empty_strided_cuda((64, 500), (500, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(32000)](buf3, primals_5, buf18, 32000, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 500), (500, 1), torch.float32) extern_kernels.addmm(buf3, reinterpret_tensor(primals_1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 500), (1, 4), 0), alpha=1, beta=1, out=buf4) del buf3 del primals_6 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (500, 4), (1, 500), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, buf4, reinterpret_tensor(primals_9, (500, 4), (1, 500), 0), alpha=1, beta=1, out=buf6) del primals_10 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_tanh_2[grid(256)](buf8, primals_12, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_12 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_tanh_2[grid(256)](buf10, primals_14, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_14 buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_tanh_2[grid(256)](buf12, primals_16, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_16 buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_18, buf12, reinterpret_tensor( primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_18 buf14 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_add_tanh_3[grid(256)](buf13, buf5, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) buf15 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_20, buf14, reinterpret_tensor( primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_20 buf16 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf15, buf16, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_5[grid(256)](buf16, buf17, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf16 return (buf5, buf14, buf17, buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf4, buf5, buf8, buf10, buf12, buf13, buf14, buf17, primals_19, primals_17, primals_15, primals_13, primals_11, primals_9, primals_7, buf18, primals_4) class NTMNew(nn.Module): def __init__(self, opt, hidden_dim=500, l1_strength=0.001): super(NTMNew, self).__init__() self.input_dim = opt.bow_vocab_size self.topic_num = opt.topic_num topic_num = opt.topic_num self.fc11 = nn.Linear(self.input_dim, hidden_dim) self.fc12 = nn.Linear(hidden_dim, hidden_dim) self.fc21 = nn.Linear(hidden_dim, topic_num) self.fc22 = nn.Linear(hidden_dim, topic_num) self.fcs = nn.Linear(self.input_dim, hidden_dim, bias=False) self.fcg1 = nn.Linear(topic_num, topic_num) self.fcg2 = nn.Linear(topic_num, topic_num) self.fcg3 = nn.Linear(topic_num, topic_num) self.fcg4 = nn.Linear(topic_num, topic_num) self.fcd1 = nn.Linear(topic_num, self.input_dim) self.l1_strength = torch.FloatTensor([l1_strength]) def encode(self, x): e1 = F.relu(self.fc11(x)) e1 = F.relu(self.fc12(e1)) e1 = e1.add(self.fcs(x)) return self.fc21(e1), self.fc22(e1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def generate(self, h): g1 = torch.tanh(self.fcg1(h)) g1 = torch.tanh(self.fcg2(g1)) g1 = torch.tanh(self.fcg3(g1)) g1 = torch.tanh(self.fcg4(g1)) g1 = g1.add(h) return g1 def decode(self, z): d1 = F.softmax(self.fcd1(z), dim=1) return d1 def print_topic_words(self, vocab_dic, fn, n_top_words=10): beta_exp = self.fcd1.weight.data.cpu().numpy().T logging.info('Writing to %s' % fn) fw = open(fn, 'w') for k, beta_k in enumerate(beta_exp): topic_words = [vocab_dic[w_id] for w_id in np.argsort(beta_k)[: -n_top_words - 1:-1]] None fw.write('{}\n'.format(' '.join(topic_words))) fw.close() def get_topic_words(self): return self.fcd1.weight.T def forward(self, input_0): primals_2 = self.fc11.weight primals_3 = self.fc11.bias primals_4 = self.fc12.weight primals_5 = self.fc12.bias primals_7 = self.fc21.weight primals_8 = self.fc21.bias primals_9 = self.fc22.weight primals_10 = self.fc22.bias primals_6 = self.fcs.weight primals_11 = self.fcg1.weight primals_12 = self.fcg1.bias primals_13 = self.fcg2.weight primals_14 = self.fcg2.bias primals_15 = self.fcg3.weight primals_16 = self.fcg3.bias primals_17 = self.fcg4.weight primals_18 = self.fcg4.bias primals_19 = self.fcd1.weight primals_20 = self.fcd1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return output[0], output[1], output[2], output[3], output[4]
WuDiDaBinGe/TAKG
NTM
false
1,899
[ "MIT" ]
0
83e608e677a4ee74722d18cb5ef430f4f6c6ad31
https://github.com/WuDiDaBinGe/TAKG/tree/83e608e677a4ee74722d18cb5ef430f4f6c6ad31
from _paritybench_helpers import _mock_config import logging import torch import numpy as np from torch.nn import functional as F import torch.multiprocessing from torch import nn import torch.utils.data class Model(nn.Module): def __init__(self, opt, hidden_dim=500, l1_strength=0.001): super().__init__() self.input_dim = opt.bow_vocab_size self.topic_num = opt.topic_num topic_num = opt.topic_num self.fc11 = nn.Linear(self.input_dim, hidden_dim) self.fc12 = nn.Linear(hidden_dim, hidden_dim) self.fc21 = nn.Linear(hidden_dim, topic_num) self.fc22 = nn.Linear(hidden_dim, topic_num) self.fcs = nn.Linear(self.input_dim, hidden_dim, bias=False) self.fcg1 = nn.Linear(topic_num, topic_num) self.fcg2 = nn.Linear(topic_num, topic_num) self.fcg3 = nn.Linear(topic_num, topic_num) self.fcg4 = nn.Linear(topic_num, topic_num) self.fcd1 = nn.Linear(topic_num, self.input_dim) self.l1_strength = torch.FloatTensor([l1_strength]) def encode(self, x): e1 = F.relu(self.fc11(x)) e1 = F.relu(self.fc12(e1)) e1 = e1.add(self.fcs(x)) return self.fc21(e1), self.fc22(e1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def generate(self, h): g1 = torch.tanh(self.fcg1(h)) g1 = torch.tanh(self.fcg2(g1)) g1 = torch.tanh(self.fcg3(g1)) g1 = torch.tanh(self.fcg4(g1)) g1 = g1.add(h) return g1 def decode(self, z): d1 = F.softmax(self.fcd1(z), dim=1) return d1 def forward(self, x): mu, logvar = self.encode(x.view(-1, self.input_dim)) z = self.reparameterize(mu, logvar) g = self.generate(z) return z, g, self.decode(g), mu, logvar def print_topic_words(self, vocab_dic, fn, n_top_words=10): beta_exp = self.fcd1.weight.data.cpu().numpy().T logging.info('Writing to %s' % fn) fw = open(fn, 'w') for k, beta_k in enumerate(beta_exp): topic_words = [vocab_dic[w_id] for w_id in np.argsort(beta_k)[: -n_top_words - 1:-1]] None fw.write('{}\n'.format(' '.join(topic_words))) fw.close() def get_topic_words(self): return self.fcd1.weight.T def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
BertSelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {}) # %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ek/cekc4xnuyislvdovnzf5y3lkc2xvyqm5n6o243mths7wzeuvqbod.py # Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul] # Source node to ATen node mapping: # attention_mask => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %unsqueeze), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -10000.0), kwargs = {}) # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %mul), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) triton_poi_fused_mul_rsub_1 = async_compile.triton('triton_poi_fused_mul_rsub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -10000.0 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp4 tmp11 = tmp7 + tmp10 tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp15 = tmp2 - tmp14 tmp16 = tmp15 * tmp4 tmp17 = tmp13 + tmp16 tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp21 = tmp2 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tmp19 + tmp22 tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tmp36 = float("-inf") tmp37 = tmp6 == tmp36 tmp38 = tmp37 == 0 tmp39 = tmp38.to(tl.int64) tmp40 = (tmp39 != 0) tmp41 = tmp11 == tmp36 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = (tmp43 != 0) tmp45 = tmp40 | tmp44 tmp46 = tmp17 == tmp36 tmp47 = tmp46 == 0 tmp48 = tmp47.to(tl.int64) tmp49 = (tmp48 != 0) tmp50 = tmp45 | tmp49 tmp51 = tmp23 == tmp36 tmp52 = tmp51 == 0 tmp53 = tmp52.to(tl.int64) tmp54 = (tmp53 != 0) tmp55 = tmp50 | tmp54 tl.store(out_ptr0 + (x3), tmp24, xmask) tl.store(out_ptr1 + (x3), tmp35, xmask) tl.store(out_ptr2 + (x3), tmp55, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/rs/crssvp4cfqnmhgd7rc7jzgyvj2wsdpbpk6qivlfh3twgsgwopsiy.py # Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul] # Source node to ATen node mapping: # attention_mask => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %unsqueeze), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -10000.0), kwargs = {}) # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %mul), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_mul_rsub_2 = async_compile.triton('triton_poi_fused_mul_rsub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x5 = xindex x3 = (xindex // 64) x6 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last').to(tl.int1) tmp2 = tl.load(in_out_ptr0 + (x5), xmask) tmp3 = tl.load(in_ptr1 + (x6 + (16*x3)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = -10000.0 tmp7 = tmp5 * tmp6 tmp8 = tmp2 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp14 = 0.0 tmp15 = tl.where(tmp1, tmp14, tmp13) tl.store(in_out_ptr0 + (x5), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # context_layer_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2) del primals_8 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(buf1, primals_6, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_6 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) # Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul] triton_poi_fused_mul_rsub_1.run(buf5, primals_1, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul] triton_poi_fused_mul_rsub_2.run(buf9, buf8, primals_1, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf8 del primals_1 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(buf2, primals_9, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del primals_9 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) del buf11 return (reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, query_states, key_states, value_states, attention_mask): """ Args: query_states: (N, Lq, D) key_states: (N, L, D) value_states: (N, L, D) attention_mask: (N, Lq, L) Returns: """ attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0 mixed_query_layer = self.query(query_states) mixed_key_layer = self.key(key_states) mixed_value_layer = self.value(value_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -10000.0 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp4 tmp11 = tmp7 + tmp10 tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp15 = tmp2 - tmp14 tmp16 = tmp15 * tmp4 tmp17 = tmp13 + tmp16 tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp21 = tmp2 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tmp19 + tmp22 tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tmp36 = float('-inf') tmp37 = tmp6 == tmp36 tmp38 = tmp37 == 0 tmp39 = tmp38.to(tl.int64) tmp40 = tmp39 != 0 tmp41 = tmp11 == tmp36 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tmp46 = tmp17 == tmp36 tmp47 = tmp46 == 0 tmp48 = tmp47.to(tl.int64) tmp49 = tmp48 != 0 tmp50 = tmp45 | tmp49 tmp51 = tmp23 == tmp36 tmp52 = tmp51 == 0 tmp53 = tmp52.to(tl.int64) tmp54 = tmp53 != 0 tmp55 = tmp50 | tmp54 tl.store(out_ptr0 + x3, tmp24, xmask) tl.store(out_ptr1 + x3, tmp35, xmask) tl.store(out_ptr2 + x3, tmp55, xmask) @triton.jit def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex x3 = xindex // 64 x6 = xindex % 16 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x5, xmask) tmp3 = tl.load(in_ptr1 + (x6 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = -10000.0 tmp7 = tmp5 * tmp6 tmp8 = tmp2 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp14 = 0.0 tmp15 = tl.where(tmp1, tmp14, tmp13) tl.store(in_out_ptr0 + x5, tmp15, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2) del primals_8 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_6, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_mul_rsub_1[grid(64)](buf5, primals_1, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_mul_rsub_2[grid(256)](buf9, buf8, primals_1, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_1 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_9 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf11 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_7, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super(BertSelfAttentionNew, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.query.weight primals_3 = self.query.bias primals_5 = self.key.weight primals_6 = self.key.bias primals_8 = self.value.weight primals_9 = self.value.bias primals_1 = input_0 primals_4 = input_1 primals_7 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
RoshanTanisha/TVCaption
BertSelfAttention
false
1,900
[ "MIT" ]
0
8b14a340134ec69ed87426ee1f0e93e53f6456e5
https://github.com/RoshanTanisha/TVCaption/tree/8b14a340134ec69ed87426ee1f0e93e53f6456e5
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class Model(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, query_states, key_states, value_states, attention_mask): """ Args: query_states: (N, Lq, D) key_states: (N, L, D) value_states: (N, L, D) attention_mask: (N, Lq, L) Returns: """ attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0 mixed_query_layer = self.query(query_states) mixed_key_layer = self.key(key_states) mixed_value_layer = self.value(value_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
BertAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {}) # %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ek/cekc4xnuyislvdovnzf5y3lkc2xvyqm5n6o243mths7wzeuvqbod.py # Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul] # Source node to ATen node mapping: # attention_mask => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %unsqueeze), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -10000.0), kwargs = {}) # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %mul), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) triton_poi_fused_mul_rsub_1 = async_compile.triton('triton_poi_fused_mul_rsub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -10000.0 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp4 tmp11 = tmp7 + tmp10 tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp15 = tmp2 - tmp14 tmp16 = tmp15 * tmp4 tmp17 = tmp13 + tmp16 tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp21 = tmp2 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tmp19 + tmp22 tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tmp36 = float("-inf") tmp37 = tmp6 == tmp36 tmp38 = tmp37 == 0 tmp39 = tmp38.to(tl.int64) tmp40 = (tmp39 != 0) tmp41 = tmp11 == tmp36 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = (tmp43 != 0) tmp45 = tmp40 | tmp44 tmp46 = tmp17 == tmp36 tmp47 = tmp46 == 0 tmp48 = tmp47.to(tl.int64) tmp49 = (tmp48 != 0) tmp50 = tmp45 | tmp49 tmp51 = tmp23 == tmp36 tmp52 = tmp51 == 0 tmp53 = tmp52.to(tl.int64) tmp54 = (tmp53 != 0) tmp55 = tmp50 | tmp54 tl.store(out_ptr0 + (x3), tmp24, xmask) tl.store(out_ptr1 + (x3), tmp35, xmask) tl.store(out_ptr2 + (x3), tmp55, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/rs/crssvp4cfqnmhgd7rc7jzgyvj2wsdpbpk6qivlfh3twgsgwopsiy.py # Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul] # Source node to ATen node mapping: # attention_mask => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %unsqueeze), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -10000.0), kwargs = {}) # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %mul), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_mul_rsub_2 = async_compile.triton('triton_poi_fused_mul_rsub_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x5 = xindex x3 = (xindex // 64) x6 = xindex % 16 tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last').to(tl.int1) tmp2 = tl.load(in_out_ptr0 + (x5), xmask) tmp3 = tl.load(in_ptr1 + (x6 + (16*x3)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = -10000.0 tmp7 = tmp5 * tmp6 tmp8 = tmp2 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp14 = 0.0 tmp15 = tl.where(tmp1, tmp14, tmp13) tl.store(in_out_ptr0 + (x5), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # context_layer_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/hk/chkirlrxzb52fxbrq2rynamgt7aligt77yn6j6ihfk46whjvd374.py # Topologically Sorted Source Nodes: [add_1, u, sub_1, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow] # Source node to ATen node mapping: # add_1 => add_1 # pow_1 => pow_1 # s => mean_1 # sub_1 => sub_2 # u => mean # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) triton_poi_fused_add_mean_pow_sub_5 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ci/ccia6uv74fhe5tntfhh5dckf3prhv4m42isfaz2ejda3c5cc4tka.py # Topologically Sorted Source Nodes: [add_1, u, sub_1, add_2, sqrt, x_3, mul_1, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # hidden_states_2 => add_3 # mul_1 => mul_1 # sqrt => sqrt # sub_1 => sub_2 # u => mean # x_3 => div_2 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %div_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {}) triton_poi_fused_add_div_mean_mul_sqrt_sub_6 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x2), xmask) tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(buf1, primals_6, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_6 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) # Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul] triton_poi_fused_mul_rsub_1.run(buf5, primals_1, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [sub, attention_mask], Original ATen: [aten.rsub, aten.mul] triton_poi_fused_mul_rsub_2.run(buf9, buf8, primals_1, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf8 del primals_1 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(buf2, primals_8, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del primals_8 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add_1, u, sub_1, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow] triton_poi_fused_add_mean_pow_sub_5.run(buf13, primals_4, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_1, u, sub_1, add_2, sqrt, x_3, mul_1, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_sqrt_sub_6.run(primals_11, buf13, primals_4, buf14, buf15, primals_12, buf16, 64, grid=grid(64), stream=stream0) del buf14 del buf15 del primals_12 return (buf16, primals_4, primals_11, buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, query_states, key_states, value_states, attention_mask): """ Args: query_states: (N, Lq, D) key_states: (N, L, D) value_states: (N, L, D) attention_mask: (N, Lq, L) Returns: """ attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0 mixed_query_layer = self.query(query_states) mixed_key_layer = self.key(key_states) mixed_value_layer = self.value(value_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): """ Args: input_tensor: (N, L, D) attention_mask: (N, Lq, L) Returns: """ self_output = self.self(input_tensor, input_tensor, input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, layer_norm_eps=1, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_mul_rsub_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy ='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -10000.0 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp4 tmp11 = tmp7 + tmp10 tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp15 = tmp2 - tmp14 tmp16 = tmp15 * tmp4 tmp17 = tmp13 + tmp16 tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp21 = tmp2 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tmp19 + tmp22 tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tmp36 = float('-inf') tmp37 = tmp6 == tmp36 tmp38 = tmp37 == 0 tmp39 = tmp38.to(tl.int64) tmp40 = tmp39 != 0 tmp41 = tmp11 == tmp36 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tmp46 = tmp17 == tmp36 tmp47 = tmp46 == 0 tmp48 = tmp47.to(tl.int64) tmp49 = tmp48 != 0 tmp50 = tmp45 | tmp49 tmp51 = tmp23 == tmp36 tmp52 = tmp51 == 0 tmp53 = tmp52.to(tl.int64) tmp54 = tmp53 != 0 tmp55 = tmp50 | tmp54 tl.store(out_ptr0 + x3, tmp24, xmask) tl.store(out_ptr1 + x3, tmp35, xmask) tl.store(out_ptr2 + x3, tmp55, xmask) @triton.jit def triton_poi_fused_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x5 = xindex x3 = xindex // 64 x6 = xindex % 16 tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x5, xmask) tmp3 = tl.load(in_ptr1 + (x6 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = -10000.0 tmp7 = tmp5 * tmp6 tmp8 = tmp2 + tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.exp(tmp10) tmp13 = tmp11 / tmp12 tmp14 = 0.0 tmp15 = tl.where(tmp1, tmp14, tmp13) tl.store(in_out_ptr0 + x5, tmp15, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_6, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_6 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_mul_rsub_1[grid(64)](buf5, primals_1, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_mul_rsub_2[grid(256)](buf9, buf8, primals_1, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_1 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_8, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_pow_sub_5[grid(16)](buf13, primals_4, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_sub_6[grid(64)](primals_11, buf13, primals_4, buf14, buf15, primals_12, buf16, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf14 del buf15 del primals_12 return buf16, primals_4, primals_11, buf9, reinterpret_tensor(buf10, ( 16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9 class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, query_states, key_states, value_states, attention_mask): """ Args: query_states: (N, Lq, D) key_states: (N, L, D) value_states: (N, L, D) attention_mask: (N, Lq, L) Returns: """ attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0 mixed_query_layer = self.query(query_states) mixed_key_layer = self.key(key_states) mixed_value_layer = self.value(value_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttentionNew(nn.Module): def __init__(self, config): super(BertAttentionNew, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_0, input_1): primals_2 = self.self.query.weight primals_3 = self.self.query.bias primals_5 = self.self.key.weight primals_6 = self.self.key.bias primals_7 = self.self.value.weight primals_8 = self.self.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.weight primals_12 = self.output.LayerNorm.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
RoshanTanisha/TVCaption
BertAttention
false
1,901
[ "MIT" ]
0
8b14a340134ec69ed87426ee1f0e93e53f6456e5
https://github.com/RoshanTanisha/TVCaption/tree/8b14a340134ec69ed87426ee1f0e93e53f6456e5
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, query_states, key_states, value_states, attention_mask): """ Args: query_states: (N, Lq, D) key_states: (N, L, D) value_states: (N, L, D) attention_mask: (N, Lq, L) Returns: """ attention_mask = (1 - attention_mask.unsqueeze(1)) * -10000.0 mixed_query_layer = self.query(query_states) mixed_key_layer = self.key(key_states) mixed_value_layer = self.value(value_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class Model(nn.Module): def __init__(self, config): super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_ma # ... truncated (>4000 chars) for memory efficiency
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/v7/cv7humnywkkqhrumbeetegqlkretdwtkj5pcanrbgxrolupvobzt.py # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, h], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # h => mul_3 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_1 => pow_1 # tanh => tanh # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7978845608028654), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {}) triton_poi_fused_add_mul_pow_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + (x0), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, h], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_5, alpha=1, beta=1, out=buf2) del primals_4 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn from torch.nn.parameter import Parameter def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class Conv1D(nn.Module): def __init__(self, nf, nx): super(Conv1D, self).__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = Parameter(w) self.bias = Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class MLP(nn.Module): def __init__(self, n_state, config): super(MLP, self).__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu def forward(self, x): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) return h2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_state': 4, 'config': _mock_config(n_embd=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_pow_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp0 * tmp0 tmp4 = tmp3 * tmp0 tmp5 = 0.044715 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tmp8 = 0.7978845608028654 tmp9 = tmp7 * tmp8 tmp10 = libdevice.tanh(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp2 * tmp12 tl.store(out_ptr0 + x0, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_pow_tanh_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), primals_5, alpha=1, beta=1, out=buf2) del primals_4 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf0, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), reinterpret_tensor( primals_1, (4, 64), (1, 4), 0) def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class Conv1D(nn.Module): def __init__(self, nf, nx): super(Conv1D, self).__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = Parameter(w) self.bias = Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class MLPNew(nn.Module): def __init__(self, n_state, config): super(MLPNew, self).__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu def forward(self, input_0): primals_3 = self.c_fc.weight primals_2 = self.c_fc.bias primals_5 = self.c_proj.weight primals_4 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
VayerMaking/gpt-2-Pytorch
MLP
false
1,902
[ "MIT" ]
0
7bc35f3c1d6c87d1ac306c0f789282b9df59182a
https://github.com/VayerMaking/gpt-2-Pytorch/tree/7bc35f3c1d6c87d1ac306c0f789282b9df59182a
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn from torch.nn.parameter import Parameter def gelu(x): return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) class Conv1D(nn.Module): def __init__(self, nf, nx): super().__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = Parameter(w) self.bias = Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class Model(nn.Module): def __init__(self, n_state, config): super().__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = gelu def forward(self, x): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) return h2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
BertPredictionHeadTransform
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/k6/ck6o2ucwdqtvjyw7bruyzgade2k6iruvl53t2wmqy2xkgypurpgf.py # Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, u, sub, pow_1, s], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow] # Source node to ATen node mapping: # add => add # erf => erf # hidden_states_1 => mul_1 # mul => mul # pow_1 => pow_1 # s => mean_1 # sub => sub # truediv => div # u => mean # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) triton_poi_fused_add_div_erf_mean_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_add_div_erf_mean_mul_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mean_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp9 * tmp3 tmp12 = libdevice.erf(tmp11) tmp13 = tmp12 + tmp6 tmp14 = tmp10 * tmp13 tmp15 = tmp8 + tmp14 tmp17 = tmp16 * tmp1 tmp18 = tmp16 * tmp3 tmp19 = libdevice.erf(tmp18) tmp20 = tmp19 + tmp6 tmp21 = tmp17 * tmp20 tmp22 = tmp15 + tmp21 tmp24 = tmp23 * tmp1 tmp25 = tmp23 * tmp3 tmp26 = libdevice.erf(tmp25) tmp27 = tmp26 + tmp6 tmp28 = tmp24 * tmp27 tmp29 = tmp22 + tmp28 tmp30 = 4.0 tmp31 = tmp29 / tmp30 tmp32 = tmp8 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp14 - tmp31 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp21 - tmp31 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp28 - tmp31 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp42 / tmp30 tl.store(out_ptr0 + (x0), tmp31, xmask) tl.store(out_ptr1 + (x0), tmp43, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ew/cewcb66a7hyf2vxy6evimdhxxg6p7casfhukvhbgdoijgab2kyck.py # Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, sub, add_1, sqrt, x, mul_2, hidden_states_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.sub, aten.sqrt] # Source node to ATen node mapping: # add => add # add_1 => add_1 # erf => erf # hidden_states_1 => mul_1 # hidden_states_2 => add_2 # mul => mul # mul_2 => mul_2 # sqrt => sqrt # sub => sub # truediv => div # x => div_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mean), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %div_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_5), kwargs = {}) triton_poi_fused_add_div_erf_mul_sqrt_sub_1 = async_compile.triton('triton_poi_fused_add_div_erf_mul_sqrt_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865475 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tmp11 = tmp9 - tmp10 tmp13 = tmp12 + tmp7 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp11 / tmp14 tmp16 = tmp0 * tmp15 tmp18 = tmp16 + tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, u, sub, pow_1, s], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.mean, aten.sub, aten.pow] stream0 = get_raw_stream(0) triton_poi_fused_add_div_erf_mean_mul_pow_sub_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_1, sub, add_1, sqrt, x, mul_2, hidden_states_2], Original ATen: [aten.mul, aten.div, aten.erf, aten.add, aten.sub, aten.sqrt] triton_poi_fused_add_div_erf_mul_sqrt_sub_1.run(primals_4, buf0, buf1, buf2, primals_5, buf3, 256, grid=grid(256), stream=stream0) del buf1 del buf2 del primals_5 return (buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT"s gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, hidden_states): """(N, L, D)""" hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, layer_norm_eps=1)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp9 * tmp3 tmp12 = libdevice.erf(tmp11) tmp13 = tmp12 + tmp6 tmp14 = tmp10 * tmp13 tmp15 = tmp8 + tmp14 tmp17 = tmp16 * tmp1 tmp18 = tmp16 * tmp3 tmp19 = libdevice.erf(tmp18) tmp20 = tmp19 + tmp6 tmp21 = tmp17 * tmp20 tmp22 = tmp15 + tmp21 tmp24 = tmp23 * tmp1 tmp25 = tmp23 * tmp3 tmp26 = libdevice.erf(tmp25) tmp27 = tmp26 + tmp6 tmp28 = tmp24 * tmp27 tmp29 = tmp22 + tmp28 tmp30 = 4.0 tmp31 = tmp29 / tmp30 tmp32 = tmp8 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp14 - tmp31 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp21 - tmp31 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp28 - tmp31 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp42 / tmp30 tl.store(out_ptr0 + x0, tmp31, xmask) tl.store(out_ptr1 + x0, tmp43, xmask) @triton.jit def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865475 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tmp11 = tmp9 - tmp10 tmp13 = tmp12 + tmp7 tmp14 = libdevice.sqrt(tmp13) tmp15 = tmp11 / tmp14 tmp16 = tmp0 * tmp15 tmp18 = tmp16 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mean_mul_pow_sub_0[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_erf_mul_sqrt_sub_1[grid(256)](primals_4, buf0, buf1, buf2, primals_5, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf2 del primals_5 return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT"s gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertPredictionHeadTransformNew(nn.Module): def __init__(self, config): super(BertPredictionHeadTransformNew, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, input_0): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_4 = self.LayerNorm.weight primals_5 = self.LayerNorm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
RoshanTanisha/TVCaption
BertPredictionHeadTransform
false
1,903
[ "MIT" ]
0
8b14a340134ec69ed87426ee1f0e93e53f6456e5
https://github.com/RoshanTanisha/TVCaption/tree/8b14a340134ec69ed87426ee1f0e93e53f6456e5
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT"s gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class Model(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config. layer_norm_eps) def forward(self, hidden_states): """(N, L, D)""" hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
_MCLSTMCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/7x/c7xa2fzoeg7fkdhz3rijf3negddtnquocgqamwnzmzil4bl2mrsa.py # Topologically Sorted Source Nodes: [ct, norm, add, truediv], Original ATen: [aten.new_zeros, aten.linalg_vector_norm, aten.add, aten.div] # Source node to ATen node mapping: # add => add # ct => full # norm => full_default, pow_2, sum_1 # truediv => div # Graph fragment: # %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%full_default, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 1.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%full, %add), kwargs = {}) triton_per_fused_add_div_linalg_vector_norm_new_zeros_0 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_new_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(2,), equal_to_1=(1,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_linalg_vector_norm_new_zeros_0(out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = (rindex // 4) tmp0 = 0.0 tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp6 = tmp0 / tmp5 tl.store(out_ptr1 + (tl.broadcast_to(r1 + (12*r2), [XBLOCK, RBLOCK])), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ik/ciktnpigrrv57ihedpahnqmqkyqnrxb2ve44dt2xuj4zd3xovwjp.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_for_fused_1 = async_compile.triton('triton_for_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.foreach( num_warps=8, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'kernel_name': 'triton_for_fused_1', 'mutated_arg_names': [], 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, ) @triton.jit def triton_for_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1): pid = tl.program_id(0) XBLOCK: tl.constexpr = 1024 num_xblocks_0 = tl.cdiv(16, XBLOCK) num_xblocks_1 = num_xblocks_0 + tl.cdiv(16, XBLOCK) if pid < num_xblocks_0: pid_offset = pid xnumel = 16 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tl.store(out_ptr0 + (x0 + (12*x1)), tmp0, xmask) elif pid < num_xblocks_1: pid_offset = pid - num_xblocks_0 xnumel = 16 rnumel = 1 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex x3 = xindex % 4 x4 = (xindex // 4) tmp1 = tl.load(in_ptr1 + (x5), xmask) tl.store(out_ptr1 + (x3 + (12*x4)), tmp1, xmask) else: pass ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ro/croozag2ipv2izkskbnusmwfrgzvgxuxdtgvh6cmwihuv2ft2g6q.py # Topologically Sorted Source Nodes: [sigmoid, i], Original ATen: [aten.sigmoid, aten.div] # Source node to ATen node mapping: # i => div_1 # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sigmoid, %expand), kwargs = {}) triton_poi_fused_div_sigmoid_2 = async_compile.triton('triton_poi_fused_div_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tl_math.abs(tmp3) tmp6 = tl.sigmoid(tmp5) tmp7 = tl_math.abs(tmp6) tmp8 = tmp4 + tmp7 tmp10 = tl.sigmoid(tmp9) tmp11 = tl_math.abs(tmp10) tmp12 = tmp8 + tmp11 tmp14 = tl.sigmoid(tmp13) tmp15 = tl_math.abs(tmp14) tmp16 = tmp12 + tmp15 tmp17 = 1e-12 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp1 / tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/qo/cqohmoojuze4ghdeumxkas5argfq7scxxrrrrmi4ja5aujxqylbm.py # Topologically Sorted Source Nodes: [ct], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # ct => full # Graph fragment: # %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_3 = async_compile.triton('triton_poi_fused_new_zeros_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_3(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/sb/csb52ia7nfbux6ajj3tis46aqykq6p4giv3lsll3x4sellt3qcel.py # Topologically Sorted Source Nodes: [relu, r], Original ATen: [aten.relu, aten.div] # Source node to ATen node mapping: # r => div_2 # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %expand_1), kwargs = {}) triton_poi_fused_div_relu_4 = async_compile.triton('triton_poi_fused_div_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_relu_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tl_math.abs(tmp4) tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tl_math.abs(tmp7) tmp9 = tmp5 + tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = tl_math.abs(tmp11) tmp13 = tmp9 + tmp12 tmp15 = triton_helpers.maximum(tmp1, tmp14) tmp16 = tl_math.abs(tmp15) tmp17 = tmp13 + tmp16 tmp18 = 1e-12 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = tmp2 / tmp19 tl.store(out_ptr0 + (x2), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7c/c7clas5lw2nusjly52sgckjeygnor3o6xedozxfydrxogcc5ah6f.py # Topologically Sorted Source Nodes: [o, m_new, sub, ct_1, norm_1, add_2], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm] # Source node to ATen node mapping: # add_2 => add_2 # ct_1 => mul_1 # m_new => add_1 # norm_1 => abs_4, pow_8, sum_4 # o => sigmoid_1 # sub => sub # Graph fragment: # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_2,), kwargs = {}) # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %squeeze_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {}) # %mul_1 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %add_1), kwargs = {}) # %abs_4 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_1,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_4, None), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 1.0), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_8, 1e-05), kwargs = {}) triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_out_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp6 * tmp2 tmp8 = tl_math.abs(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tl.store(in_out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp2, None) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp7, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ui/cuiig7hwoftgd73y5g4nxeusjygas4d43jgp6jstawnsxmuvtesk.py # Topologically Sorted Source Nodes: [features_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # features_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%select_1, %select_5, %div_3], -1), kwargs = {}) triton_poi_fused_cat_6 = async_compile.triton('triton_poi_fused_cat_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp15 = tl.load(in_ptr3 + (0)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + (4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (16 + (4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/u6/cu64qvgmjs5gpnv6x7okvus6vtjdt7t7hrq67kwq7qihx4y6hhlw.py # Topologically Sorted Source Nodes: [o_1, m_new_1, sub_1, ct_2, norm_2, add_4], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm] # Source node to ATen node mapping: # add_4 => add_4 # ct_2 => mul_3 # m_new_1 => add_3 # norm_2 => abs_7, pow_14, sum_7 # o_1 => sigmoid_3 # sub_1 => sub_1 # Graph fragment: # %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%addmm_5,), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_2, %squeeze_3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_3), kwargs = {}) # %mul_3 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %add_3), kwargs = {}) # %abs_7 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_3,), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_7, None), kwargs = {}) # %pow_14 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_7, 1.0), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_14, 1e-05), kwargs = {}) triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_out_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp6 * tmp2 tmp8 = tl_math.abs(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tl.store(in_out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp2, None) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp7, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tl/ctlao5dome6dvekazjdh4pzndckhm5j242u3aretn7zguljoglem.py # Topologically Sorted Source Nodes: [features_2], Original ATen: [aten.cat] # Source node to ATen node mapping: # features_2 => cat_2 # Graph fragment: # %cat_2 : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%select_2, %select_6, %div_6], -1), kwargs = {}) triton_poi_fused_cat_8 = async_compile.triton('triton_poi_fused_cat_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp15 = tl.load(in_ptr3 + (0)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 + (4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (32 + (4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/5e/c5egga74hinxugpzzgffdhddgnaewjqkcaq6bjtgbdqsoepsqhjp.py # Topologically Sorted Source Nodes: [features_3], Original ATen: [aten.cat] # Source node to ATen node mapping: # features_3 => cat_3 # Graph fragment: # %cat_3 : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%select_3, %select_7, %div_9], -1), kwargs = {}) triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp15 = tl.load(in_ptr3 + (0)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (48 + (4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (48 + (4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/56/c56dlfmd6ph7xm4t43oba7ddqf5xjsaulzwau7eevrwffyi54oh6.py # Topologically Sorted Source Nodes: [m_new_3], Original ATen: [aten.add] # Source node to ATen node mapping: # m_new_3 => add_7 # Graph fragment: # %add_7 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_6, %squeeze_7), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/en/cengjp6catznd72cpbumzf3yqxvgant3mspxbpqyimls3kme6rgd.py # Topologically Sorted Source Nodes: [m_out, c], Original ATen: [aten.stack] # Source node to ATen node mapping: # c => cat_5 # m_out => cat_4 # Graph fragment: # %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %mul_2, %mul_4, %mul_6],), kwargs = {}) # %cat_5 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_1, %mul_3, %mul_5, %mul_7],), kwargs = {}) triton_poi_fused_stack_11 = async_compile.triton('triton_poi_fused_stack_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.load(in_ptr1 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + (4*((-4) + x1))), tmp14 & xmask, other=0.0) tmp16 = tl.sigmoid(tmp15) tmp17 = tl.load(in_ptr3 + (x0 + (4*((-4) + x1))), tmp14 & xmask, other=0.0) tmp18 = tmp16 * tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tmp0 >= tmp12 tmp22 = tl.full([1], 12, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr4 + (x0 + (4*((-8) + x1))), tmp24 & xmask, other=0.0) tmp26 = tl.sigmoid(tmp25) tmp27 = tl.load(in_ptr5 + (x0 + (4*((-8) + x1))), tmp24 & xmask, other=0.0) tmp28 = tmp26 * tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp0 >= tmp22 tmp32 = tl.full([1], 16, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tl.load(in_ptr6 + (x0 + (4*((-12) + x1))), tmp31 & xmask, other=0.0) tmp35 = tl.sigmoid(tmp34) tmp36 = tl.load(in_ptr7 + (x0 + (4*((-12) + x1))), tmp31 & xmask, other=0.0) tmp37 = tmp35 * tmp36 tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype) tmp39 = tl.where(tmp31, tmp37, tmp38) tmp40 = tl.where(tmp24, tmp30, tmp39) tmp41 = tl.where(tmp14, tmp20, tmp40) tmp42 = tl.where(tmp4, tmp10, tmp41) tmp43 = tl.load(in_ptr8 + (x0 + (4*x1)), tmp4 & xmask, other=0.0) tmp44 = tl.load(in_ptr9 + (x0 + (4*((-4) + x1))), tmp14 & xmask, other=0.0) tmp45 = tl.load(in_ptr10 + (x0 + (4*((-8) + x1))), tmp24 & xmask, other=0.0) tmp46 = 1.0 tmp47 = tmp46 - tmp35 tmp48 = tmp47 * tmp36 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp31, tmp48, tmp49) tmp51 = tl.where(tmp24, tmp45, tmp50) tmp52 = tl.where(tmp14, tmp44, tmp51) tmp53 = tl.where(tmp4, tmp43, tmp52) tl.store(out_ptr0 + (x2), tmp42, xmask) tl.store(out_ptr1 + (x2), tmp53, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 12), (12, 1)) assert_size_stride(primals_4, (16, ), (1, )) assert_size_stride(primals_5, (16, 12), (12, 1)) assert_size_stride(primals_6, (16, ), (1, )) assert_size_stride(primals_7, (4, 12), (12, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32) buf3 = reinterpret_tensor(buf4, (4, 4), (12, 1), 8) # alias # Topologically Sorted Source Nodes: [ct, norm, add, truediv], Original ATen: [aten.new_zeros, aten.linalg_vector_norm, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_new_zeros_0.run(buf3, 1, 16, grid=grid(1), stream=stream0) buf1 = reinterpret_tensor(buf4, (4, 4), (12, 1), 0) # alias buf2 = reinterpret_tensor(buf4, (4, 4), (12, 1), 4) # alias # Unsorted Source Nodes: [], Original ATen: [] triton_for_fused_1.run(primals_1, primals_2, buf1, buf2, grid=(2, 1, 1), stream=stream0) buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf4, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf5) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, i], Original ATen: [aten.sigmoid, aten.div] triton_poi_fused_div_sigmoid_2.run(buf5, buf8, 64, grid=grid(64), stream=stream0) buf9 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid, i, matmul], Original ATen: [aten.sigmoid, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf8, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [ct], Original ATen: [aten.new_zeros] triton_poi_fused_new_zeros_3.run(buf10, 16, grid=grid(16), stream=stream0) buf11 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [relu, r], Original ATen: [aten.relu, aten.div] triton_poi_fused_div_relu_4.run(buf6, buf11, 64, grid=grid(64), stream=stream0) buf12 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu, r, matmul_1], Original ATen: [aten.relu, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf10, (4, 1, 4), (4, 0, 1), 0), buf11, out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0); del buf12 # reuse buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf15 = empty_strided_cuda((), (), torch.float32) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [o, m_new, sub, ct_1, norm_1, add_2], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm] triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5.run(buf13, buf16, buf9, buf7, buf14, 1, 16, grid=grid(1), stream=stream0) buf17 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [features_1], Original ATen: [aten.cat] triton_poi_fused_cat_6.run(primals_1, primals_2, buf14, buf16, buf17, 48, grid=grid(48), stream=stream0) buf18 = reinterpret_tensor(buf11, (4, 16), (16, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf17, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf18) buf19 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf17, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf19) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu_1, r_1], Original ATen: [aten.relu, aten.div] triton_poi_fused_div_relu_4.run(buf19, buf20, 64, grid=grid(64), stream=stream0) buf21 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [linear_5], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf17, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf21) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_2, i_1], Original ATen: [aten.sigmoid, aten.div] triton_poi_fused_div_sigmoid_2.run(buf18, buf22, 64, grid=grid(64), stream=stream0) buf23 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_2, i_1, matmul_2], Original ATen: [aten.sigmoid, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 16), buf22, out=buf23) buf24 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf14, (4, 1, 4), (4, 4, 1), 0), buf20, out=buf24) buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0); del buf23 # reuse buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((), (), torch.float32) buf28 = buf27; del buf27 # reuse # Topologically Sorted Source Nodes: [o_1, m_new_1, sub_1, ct_2, norm_2, add_4], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm] triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7.run(buf25, buf28, buf24, buf21, buf26, 1, 16, grid=grid(1), stream=stream0) buf29 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [features_2], Original ATen: [aten.cat] triton_poi_fused_cat_8.run(primals_1, primals_2, buf26, buf28, buf29, 48, grid=grid(48), stream=stream0) buf30 = reinterpret_tensor(buf22, (4, 16), (16, 1), 0); del buf22 # reuse # Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf29, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf30) buf31 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf29, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf31) buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu_2, r_2], Original ATen: [aten.relu, aten.div] triton_poi_fused_div_relu_4.run(buf31, buf32, 64, grid=grid(64), stream=stream0) buf33 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0); del buf24 # reuse # Topologically Sorted Source Nodes: [linear_8], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf29, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf33) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_4, i_2], Original ATen: [aten.sigmoid, aten.div] triton_poi_fused_div_sigmoid_2.run(buf30, buf34, 64, grid=grid(64), stream=stream0) buf35 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_4, i_2, matmul_4], Original ATen: [aten.sigmoid, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 32), buf34, out=buf35) buf36 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf26, (4, 1, 4), (4, 4, 1), 0), buf32, out=buf36) buf37 = reinterpret_tensor(buf35, (4, 4), (4, 1), 0); del buf35 # reuse buf38 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf39 = empty_strided_cuda((), (), torch.float32) buf40 = buf39; del buf39 # reuse # Topologically Sorted Source Nodes: [o_2, m_new_2, sub_2, ct_3, norm_3, add_6], Original ATen: [aten.sigmoid, aten.add, aten.rsub, aten.mul, aten.linalg_vector_norm] triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7.run(buf37, buf40, buf36, buf33, buf38, 1, 16, grid=grid(1), stream=stream0) buf41 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [features_3], Original ATen: [aten.cat] triton_poi_fused_cat_9.run(primals_1, primals_2, buf38, buf40, buf41, 48, grid=grid(48), stream=stream0) del primals_2 buf42 = reinterpret_tensor(buf34, (4, 16), (16, 1), 0); del buf34 # reuse # Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf41, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf42) del primals_4 buf43 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_10], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf41, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf43) del primals_6 buf44 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [relu_3, r_3], Original ATen: [aten.relu, aten.div] triton_poi_fused_div_relu_4.run(buf43, buf44, 64, grid=grid(64), stream=stream0) buf45 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0); del buf36 # reuse # Topologically Sorted Source Nodes: [linear_11], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf41, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf45) del primals_8 buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_6, i_3], Original ATen: [aten.sigmoid, aten.div] triton_poi_fused_div_sigmoid_2.run(buf42, buf46, 64, grid=grid(64), stream=stream0) buf47 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigmoid_6, i_3, matmul_6], Original ATen: [aten.sigmoid, aten.div, aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 48), buf46, out=buf47) buf48 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf38, (4, 1, 4), (4, 4, 1), 0), buf44, out=buf48) buf49 = reinterpret_tensor(buf47, (4, 4), (4, 1), 0); del buf47 # reuse # Topologically Sorted Source Nodes: [m_new_3], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf49, buf48, 16, grid=grid(16), stream=stream0) del buf48 buf50 = reinterpret_tensor(buf46, (16, 4), (4, 1), 0); del buf46 # reuse buf51 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [m_out, c], Original ATen: [aten.stack] triton_poi_fused_stack_11.run(buf7, buf13, buf21, buf25, buf33, buf37, buf45, buf49, buf14, buf26, buf38, buf50, buf51, 64, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf50, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0), buf4, buf5, buf6, buf7, buf13, buf14, buf16, buf17, buf18, buf19, buf20, buf21, buf25, buf26, buf28, buf29, buf30, buf31, buf32, buf33, buf37, buf38, buf40, buf41, buf42, buf43, buf44, buf45, buf49, reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 48), primals_7, primals_5, primals_3, reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 32), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 16), reinterpret_tensor(buf10, (4, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn from typing import Tuple class _Gate(nn.Module): """Utility class to implement a standard sigmoid gate""" def __init__(self, in_features: 'int', out_features: 'int'): super(_Gate, self).__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_features) self._reset_parameters() def _reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Perform forward pass through the normalised gate""" return torch.sigmoid(self.fc(x)) class _NormalizedGate(nn.Module): """Utility class to implement a gate with normalised activation function""" def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]', normalizer: 'str'): super(_NormalizedGate, self).__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_shape [0] * out_shape[1]) self.out_shape = out_shape if normalizer == 'normalized_sigmoid': self.activation = nn.Sigmoid() elif normalizer == 'normalized_relu': self.activation = nn.ReLU() else: raise ValueError( f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}" ) self._reset_parameters() def _reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Perform forward pass through the normalized gate""" h = self.fc(x).view(-1, *self.out_shape) return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1) class _MCLSTMCell(nn.Module): """The logic of the MC-LSTM cell""" def __init__(self, mass_input_size: 'int', aux_input_size: 'int', hidden_size: 'int', cfg: 'Config'): super(_MCLSTMCell, self).__init__() self.cfg = cfg self._hidden_size = hidden_size gate_inputs = aux_input_size + hidden_size + mass_input_size self.output_gate = _Gate(in_features=gate_inputs, out_features= hidden_size) self.input_gate = _NormalizedGate(in_features=gate_inputs, out_shape=(mass_input_size, hidden_size), normalizer= 'normalized_sigmoid') self.redistribution = _NormalizedGate(in_features=gate_inputs, out_shape=(hidden_size, hidden_size), normalizer='normalized_relu') self._reset_parameters() def _reset_parameters(self): if self.cfg.initial_forget_bias is not None: nn.init.constant_(self.output_gate.fc.bias, val=self.cfg. initial_forget_bias) def forward(self, x_m: 'torch.Tensor', x_a: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """Perform forward pass on the MC-LSTM cell. Parameters ---------- x_m : torch.Tensor Mass input that will be conserved by the network. x_a : torch.Tensor Auxiliary inputs that will be used to modulate the gates but whose information won't be stored internally in the MC-LSTM cells. Returns ------- Tuple[torch.Tensor, torch.Tensor] Outgoing mass and memory cells per time step of shape [sequence length, batch size, hidden size] """ _, batch_size, _ = x_m.size() ct = x_m.new_zeros((batch_size, self._hidden_size)) m_out, c = [], [] for xt_m, xt_a in zip(x_m, x_a): mt_out, ct = self._step(xt_m, xt_a, ct) m_out.append(mt_out) c.append(ct) m_out, c = torch.stack(m_out), torch.stack(c) return m_out, c def _step(self, xt_m, xt_a, c): """ Make a single time step in the MCLSTM. """ features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1) i = self.input_gate(features) r = self.redistribution(features) o = self.output_gate(features) m_in = torch.matmul(xt_m.unsqueeze(-2), i).squeeze(-2) m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2) m_new = m_in + m_sys return o * m_new, (1 - o) * m_new def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'mass_input_size': 4, 'aux_input_size': 4, 'hidden_size': 4, 'cfg': _mock_config(initial_forget_bias=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from typing import Tuple assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_linalg_vector_norm_new_zeros_0(out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex % 4 r2 = rindex // 4 tmp0 = 0.0 tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 1e-05 tmp5 = tmp3 + tmp4 tmp6 = tmp0 / tmp5 tl.store(out_ptr1 + tl.broadcast_to(r1 + 12 * r2, [XBLOCK, RBLOCK]), tmp6, None) @triton.jit def triton_for_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1): pid = tl.program_id(0) XBLOCK: tl.constexpr = 1024 num_xblocks_0 = tl.cdiv(16, XBLOCK) num_xblocks_1 = num_xblocks_0 + tl.cdiv(16, XBLOCK) if pid < num_xblocks_0: pid_offset = pid xnumel = 16 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 12 * x1), tmp0, xmask) elif pid < num_xblocks_1: pid_offset = pid - num_xblocks_0 xnumel = 16 xoffset = pid_offset * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex x3 = xindex % 4 x4 = xindex // 4 tmp1 = tl.load(in_ptr1 + x5, xmask) tl.store(out_ptr1 + (x3 + 12 * x4), tmp1, xmask) else: pass @triton.jit def triton_poi_fused_div_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.sigmoid(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tl_math.abs(tmp3) tmp6 = tl.sigmoid(tmp5) tmp7 = tl_math.abs(tmp6) tmp8 = tmp4 + tmp7 tmp10 = tl.sigmoid(tmp9) tmp11 = tl_math.abs(tmp10) tmp12 = tmp8 + tmp11 tmp14 = tl.sigmoid(tmp13) tmp15 = tl_math.abs(tmp14) tmp16 = tmp12 + tmp15 tmp17 = 1e-12 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp1 / tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_new_zeros_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_div_relu_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tl_math.abs(tmp4) tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tl_math.abs(tmp7) tmp9 = tmp5 + tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = tl_math.abs(tmp11) tmp13 = tmp9 + tmp12 tmp15 = triton_helpers.maximum(tmp1, tmp14) tmp16 = tl_math.abs(tmp15) tmp17 = tmp13 + tmp16 tmp18 = 1e-12 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = tmp2 / tmp19 tl.store(out_ptr0 + x2, tmp20, xmask) @triton.jit def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_out_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp6 * tmp2 tmp8 = tl_math.abs(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp2, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp15 = tl.load(in_ptr3 + 0) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (16 + 4 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (16 + 4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_out_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp6 * tmp2 tmp8 = tl_math.abs(tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = 1e-05 tmp13 = tmp11 + tmp12 tl.store(in_out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp2, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp7, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None) @triton.jit def triton_poi_fused_cat_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp15 = tl.load(in_ptr3 + 0) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (32 + 4 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (32 + 4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp15 = tl.load(in_ptr3 + 0) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (48 + 4 * x1 + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (48 + 4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp14 / tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp11, tmp17, tmp18) tmp20 = tl.where(tmp9, tmp10, tmp19) tmp21 = tl.where(tmp4, tmp5, tmp20) tl.store(out_ptr0 + x2, tmp21, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_stack_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tl.sigmoid(tmp5) tmp7 = tl.load(in_ptr1 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp8 = tmp6 * tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0) tmp16 = tl.sigmoid(tmp15) tmp17 = tl.load(in_ptr3 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0) tmp18 = tmp16 * tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tmp0 >= tmp12 tmp22 = tl.full([1], 12, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr4 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0) tmp26 = tl.sigmoid(tmp25) tmp27 = tl.load(in_ptr5 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0) tmp28 = tmp26 * tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp0 >= tmp22 tl.full([1], 16, tl.int64) tmp34 = tl.load(in_ptr6 + (x0 + 4 * (-12 + x1)), tmp31 & xmask, other=0.0) tmp35 = tl.sigmoid(tmp34) tmp36 = tl.load(in_ptr7 + (x0 + 4 * (-12 + x1)), tmp31 & xmask, other=0.0) tmp37 = tmp35 * tmp36 tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype) tmp39 = tl.where(tmp31, tmp37, tmp38) tmp40 = tl.where(tmp24, tmp30, tmp39) tmp41 = tl.where(tmp14, tmp20, tmp40) tmp42 = tl.where(tmp4, tmp10, tmp41) tmp43 = tl.load(in_ptr8 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp44 = tl.load(in_ptr9 + (x0 + 4 * (-4 + x1)), tmp14 & xmask, other=0.0) tmp45 = tl.load(in_ptr10 + (x0 + 4 * (-8 + x1)), tmp24 & xmask, other=0.0) tmp46 = 1.0 tmp47 = tmp46 - tmp35 tmp48 = tmp47 * tmp36 tmp49 = tl.full(tmp48.shape, 0.0, tmp48.dtype) tmp50 = tl.where(tmp31, tmp48, tmp49) tmp51 = tl.where(tmp24, tmp45, tmp50) tmp52 = tl.where(tmp14, tmp44, tmp51) tmp53 = tl.where(tmp4, tmp43, tmp52) tl.store(out_ptr0 + x2, tmp42, xmask) tl.store(out_ptr1 + x2, tmp53, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (16, 12), (12, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16, 12), (12, 1)) assert_size_stride(primals_6, (16,), (1,)) assert_size_stride(primals_7, (4, 12), (12, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf4 = empty_strided_cuda((4, 12), (12, 1), torch.float32) buf3 = reinterpret_tensor(buf4, (4, 4), (12, 1), 8) get_raw_stream(0) triton_per_fused_add_div_linalg_vector_norm_new_zeros_0[grid(1)](buf3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf1 = reinterpret_tensor(buf4, (4, 4), (12, 1), 0) buf2 = reinterpret_tensor(buf4, (4, 4), (12, 1), 4) triton_for_fused_1[2, 1, 1](primals_1, primals_2, buf1, buf2, num_warps=8, num_stages=1) buf5 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_4, buf4, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf5) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_6, buf4, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf6) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sigmoid_2[grid(64)](buf5, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf8, out=buf9) buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_new_zeros_3[grid(16)](buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = buf8 del buf8 triton_poi_fused_div_relu_4[grid(64)](buf6, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf10, (4, 1, 4), (4, 0, 1), 0), buf11, out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0) del buf12 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf15 = empty_strided_cuda((), (), torch.float32) buf16 = buf15 del buf15 triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_5[grid(1)]( buf13, buf16, buf9, buf7, buf14, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf17 = empty_strided_cuda((4, 12), (12, 1), torch.float32) triton_poi_fused_cat_6[grid(48)](primals_1, primals_2, buf14, buf16, buf17, 48, XBLOCK=64, num_warps=1, num_stages=1) buf18 = reinterpret_tensor(buf11, (4, 16), (16, 1), 0) del buf11 extern_kernels.addmm(primals_4, buf17, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf18) buf19 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_6, buf17, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf19) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_relu_4[grid(64)](buf19, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1) buf21 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_8, buf17, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf21) buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sigmoid_2[grid(64)](buf18, buf22, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 16), buf22, out=buf23) buf24 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf14, (4, 1, 4), (4, 4, 1), 0), buf20, out=buf24) buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0) del buf23 buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf27 = empty_strided_cuda((), (), torch.float32) buf28 = buf27 del buf27 triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7[grid(1)]( buf25, buf28, buf24, buf21, buf26, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf29 = empty_strided_cuda((4, 12), (12, 1), torch.float32) triton_poi_fused_cat_8[grid(48)](primals_1, primals_2, buf26, buf28, buf29, 48, XBLOCK=64, num_warps=1, num_stages=1) buf30 = reinterpret_tensor(buf22, (4, 16), (16, 1), 0) del buf22 extern_kernels.addmm(primals_4, buf29, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf30) buf31 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_6, buf29, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf31) buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_relu_4[grid(64)](buf31, buf32, 64, XBLOCK=64, num_warps=1, num_stages=1) buf33 = reinterpret_tensor(buf24, (4, 4), (4, 1), 0) del buf24 extern_kernels.addmm(primals_8, buf29, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf33) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sigmoid_2[grid(64)](buf30, buf34, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf35 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 32), buf34, out=buf35) buf36 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf26, (4, 1, 4), (4, 4, 1), 0), buf32, out=buf36) buf37 = reinterpret_tensor(buf35, (4, 4), (4, 1), 0) del buf35 buf38 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf39 = empty_strided_cuda((), (), torch.float32) buf40 = buf39 del buf39 triton_per_fused_add_linalg_vector_norm_mul_rsub_sigmoid_7[grid(1)]( buf37, buf40, buf36, buf33, buf38, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf41 = empty_strided_cuda((4, 12), (12, 1), torch.float32) triton_poi_fused_cat_9[grid(48)](primals_1, primals_2, buf38, buf40, buf41, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf42 = reinterpret_tensor(buf34, (4, 16), (16, 1), 0) del buf34 extern_kernels.addmm(primals_4, buf41, reinterpret_tensor(primals_3, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf42) del primals_4 buf43 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_6, buf41, reinterpret_tensor(primals_5, (12, 16), (1, 12), 0), alpha=1, beta=1, out=buf43) del primals_6 buf44 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_relu_4[grid(64)](buf43, buf44, 64, XBLOCK=64, num_warps=1, num_stages=1) buf45 = reinterpret_tensor(buf36, (4, 4), (4, 1), 0) del buf36 extern_kernels.addmm(primals_8, buf41, reinterpret_tensor(primals_7, (12, 4), (1, 12), 0), alpha=1, beta=1, out=buf45) del primals_8 buf46 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_div_sigmoid_2[grid(64)](buf42, buf46, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf47 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 48), buf46, out=buf47) buf48 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf38, (4, 1, 4), (4, 4, 1), 0), buf44, out=buf48) buf49 = reinterpret_tensor(buf47, (4, 4), (4, 1), 0) del buf47 triton_poi_fused_add_10[grid(16)](buf49, buf48, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf48 buf50 = reinterpret_tensor(buf46, (16, 4), (4, 1), 0) del buf46 buf51 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_stack_11[grid(64)](buf7, buf13, buf21, buf25, buf33, buf37, buf45, buf49, buf14, buf26, buf38, buf50, buf51, 64, XBLOCK=64, num_warps=1, num_stages=1) return (reinterpret_tensor(buf50, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf51, (4, 4, 4), (16, 4, 1), 0), buf4, buf5, buf6, buf7, buf13, buf14, buf16, buf17, buf18, buf19, buf20, buf21, buf25, buf26, buf28, buf29, buf30, buf31, buf32, buf33, buf37, buf38, buf40, buf41, buf42, buf43, buf44, buf45, buf49, reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 48), primals_7, primals_5, primals_3, reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 32), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 16), reinterpret_tensor(buf10, (4, 4, 1), (4, 1, 4), 0), reinterpret_tensor(primals_1, (4, 4, 1), (4, 1, 4), 0)) class _Gate(nn.Module): """Utility class to implement a standard sigmoid gate""" def __init__(self, in_features: 'int', out_features: 'int'): super(_Gate, self).__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_features) self._reset_parameters() def _reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Perform forward pass through the normalised gate""" return torch.sigmoid(self.fc(x)) class _NormalizedGate(nn.Module): """Utility class to implement a gate with normalised activation function""" def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]', normalizer: 'str'): super(_NormalizedGate, self).__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_shape [0] * out_shape[1]) self.out_shape = out_shape if normalizer == 'normalized_sigmoid': self.activation = nn.Sigmoid() elif normalizer == 'normalized_relu': self.activation = nn.ReLU() else: raise ValueError( f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}" ) self._reset_parameters() def _reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Perform forward pass through the normalized gate""" h = self.fc(x).view(-1, *self.out_shape) return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1) class _MCLSTMCellNew(nn.Module): """The logic of the MC-LSTM cell""" def __init__(self, mass_input_size: 'int', aux_input_size: 'int', hidden_size: 'int', cfg: 'Config'): super(_MCLSTMCellNew, self).__init__() self.cfg = cfg self._hidden_size = hidden_size gate_inputs = aux_input_size + hidden_size + mass_input_size self.output_gate = _Gate(in_features=gate_inputs, out_features= hidden_size) self.input_gate = _NormalizedGate(in_features=gate_inputs, out_shape=(mass_input_size, hidden_size), normalizer= 'normalized_sigmoid') self.redistribution = _NormalizedGate(in_features=gate_inputs, out_shape=(hidden_size, hidden_size), normalizer='normalized_relu') self._reset_parameters() def _reset_parameters(self): if self.cfg.initial_forget_bias is not None: nn.init.constant_(self.output_gate.fc.bias, val=self.cfg. initial_forget_bias) def _step(self, xt_m, xt_a, c): """ Make a single time step in the MCLSTM. """ features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1) i = self.input_gate(features) r = self.redistribution(features) o = self.output_gate(features) m_in = torch.matmul(xt_m.unsqueeze(-2), i).squeeze(-2) m_sys = torch.matmul(c.unsqueeze(-2), r).squeeze(-2) m_new = m_in + m_sys return o * m_new, (1 - o) * m_new def forward(self, input_0, input_1): primals_7 = self.output_gate.fc.weight primals_8 = self.output_gate.fc.bias primals_3 = self.input_gate.fc.weight primals_4 = self.input_gate.fc.bias primals_5 = self.redistribution.fc.weight primals_6 = self.redistribution.fc.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
NHoose/neuralhydrology
_MCLSTMCell
false
1,904
[ "BSD-3-Clause" ]
0
f320b417fe747a923ff8ef685ad33fd8b34effad
https://github.com/NHoose/neuralhydrology/tree/f320b417fe747a923ff8ef685ad33fd8b34effad
from _paritybench_helpers import _mock_config import torch import torch.nn as nn from typing import Tuple class _Gate(nn.Module): """Utility class to implement a standard sigmoid gate""" def __init__(self, in_features: 'int', out_features: 'int'): super().__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_features) self._reset_parameters() def _reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Perform forward pass through the normalised gate""" return torch.sigmoid(self.fc(x)) class _NormalizedGate(nn.Module): """Utility class to implement a gate with normalised activation function""" def __init__(self, in_features: 'int', out_shape: 'Tuple[int, int]', normalizer: 'str'): super().__init__() self.fc = nn.Linear(in_features=in_features, out_features=out_shape [0] * out_shape[1]) self.out_shape = out_shape if normalizer == 'normalized_sigmoid': self.activation = nn.Sigmoid() elif normalizer == 'normalized_relu': self.activation = nn.ReLU() else: raise ValueError( f"Unknown normalizer {normalizer}. Must be one of {'normalized_sigmoid', 'normalized_relu'}" ) self._reset_parameters() def _reset_parameters(self): nn.init.orthogonal_(self.fc.weight) nn.init.zeros_(self.fc.bias) def forward(self, x: 'torch.Tensor') ->torch.Tensor: """Perform forward pass through the normalized gate""" h = self.fc(x).view(-1, *self.out_shape) return torch.nn.functional.normalize(self.activation(h), p=1, dim=-1) class Model(nn.Module): """The logic of the MC-LSTM cell""" def __init__(self, mass_input_size: 'int', aux_input_size: 'int', hidden_size: 'int', cfg: 'Config'): super().__init__() self.cfg = cfg self._hidden_size = hidden_size gate_inputs = aux_input_size + hidden_size + mass_input_size self.output_gate = _Gate(in_features=gate_inputs, out_features= hidden_size) self.input_gate = _NormalizedGate(in_features=gate_inputs, out_shape=(mass_input_size, hidden_size), normalizer= 'normalized_sigmoid') self.redistribution = _NormalizedGate(in_features=gate_inputs, out_shape=(hidden_size, hidden_size), normalizer='normalized_relu') self._reset_parameters() def _reset_parameters(self): if self.cfg.initial_forget_bias is not None: nn.init.constant_(self.output_gate.fc.bias, val=self.cfg. initial_forget_bias) def forward(self, x_m: 'torch.Tensor', x_a: 'torch.Tensor') ->Tuple[ torch.Tensor, torch.Tensor]: """Perform forward pass on the MC-LSTM cell. Parameters ---------- x_m : torch.Tensor Mass input that will be conserved by the network. x_a : torch.Tensor Auxiliary inputs that will be used to modulate the gates but whose information won't be stored internally in the MC-LSTM cells. Returns ------- Tuple[torch.Tensor, torch.Tensor] Outgoing mass and memory cells per time step of shape [sequence length, batch size, hidden size] """ _, batch_size, _ = x_m.size() ct = x_m.new_zeros((batch_size, self._hidden_size)) m_out, c = [], [] for xt_m, xt_a in zip(x_m, x_a): mt_out, ct = self._step(xt_m, xt_a, ct) m_out.append(mt_out) c.append(ct) m_out, c = torch.stack(m_out), torch.stack(c) return m_out, c def _step(self, xt_m, xt_a, c): """ Make a single time step in the MCLSTM. """ features = torch.cat([xt_m, xt_a, c / (c.norm(1) + 1e-05)], dim=-1) i = self.input # ... truncated (>4000 chars) for memory efficiency
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py # Topologically Sorted Source Nodes: [attention_probs_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_probs_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_8, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_8, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/fj/cfjl47pvhwbpfbvh6rfehwy5ijxc5p3zgkld2lwf3mw5bl6pbkak.py # Topologically Sorted Source Nodes: [attention_probs_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_probs_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/2u/c2uuzth4bcn72thtw5kkgniu5d2zotoji25lb6ct6msw2er4pyfr.py # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.sum] # Source node to ATen node mapping: # attention_scores => sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%div, [-2]), kwargs = {}) triton_poi_fused_sum_2 = async_compile.triton('triton_poi_fused_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [query_layer], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [key_layer], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [value_layer], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_probs_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf3, buf4, 256, grid=grid(256), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [attention_probs_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.sum] triton_poi_fused_sum_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf7) return (reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf5, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class SelfAttention(nn.Module): def __init__(self, config): super().__init__() self.query = nn.Linear(config.hidden_size, config.hidden_size) self.key = nn.Linear(config.hidden_size, config.hidden_size) self.value = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states): query_layer = self.query(hidden_states) key_layer = self.key(hidden_states) value_layer = self.value(hidden_states) attention_probs = torch.matmul(query_layer, key_layer.transpose(-1, -2) ) attention_probs = nn.Softmax(dim=-1)(attention_probs) attention_scores = torch.sum(attention_probs, dim=-2) context_layer = torch.matmul(attention_probs, value_layer) return context_layer, attention_scores def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_sum_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_sum_2[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0) del buf4 extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf7) return reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf5, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) class SelfAttentionNew(nn.Module): def __init__(self, config): super().__init__() self.query = nn.Linear(config.hidden_size, config.hidden_size) self.key = nn.Linear(config.hidden_size, config.hidden_size) self.value = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, input_0): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
Zaaachary/CSQA
SelfAttention
false
1,905
[ "BSD-3-Clause" ]
0
6da6e076f67e9458deacb665d31463db14c7d860
https://github.com/Zaaachary/CSQA/tree/6da6e076f67e9458deacb665d31463db14c7d860
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class Model(nn.Module): def __init__(self, config): super().__init__() self.query = nn.Linear(config.hidden_size, config.hidden_size) self.key = nn.Linear(config.hidden_size, config.hidden_size) self.value = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states): query_layer = self.query(hidden_states) key_layer = self.key(hidden_states) value_layer = self.value(hidden_states) attention_probs = torch.matmul(query_layer, key_layer.transpose(-1, -2) ) attention_probs = nn.Softmax(dim=-1)(attention_probs) attention_scores = torch.sum(attention_probs, dim=-2) context_layer = torch.matmul(attention_probs, value_layer) return context_layer, attention_scores def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
VGGBase
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/f7/cf7tayhctr3m6ezk7xezotpdlc5h4drokdkbz4vy2pfkbdxnmn4q.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/xq/cxq75w43anllid5ys7ss3yyizuoeph3vvaqlvm5lo434hrywtyle.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/32/c32xiwptfqtyhbnde262mvq5tzywzo6zquurttkv7sztqnze6yni.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/jj/cjjz4tpbucpuc3faa2ky32crfwhb5fbnssd6o2yfkgdcjg2acfmo.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tg/ctgdsxjd3rciejxtjvi3y2w5fmmggh5lm3mivuygvkdzeb3zulmc.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/e7/ce7jqsdrj5poslb2hpufqd2wdux5xiab5n2auqal3ztzvkzrmnzl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ks/ckso6iiq5yfqfxmx7ilr6ufrmz6mlkiy75pexzhyf3ierq4pu3zl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 262144 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/cq/ccq66rrhrzjmgxnrmkqjfjou7btyc5dncveqmqkrdoivqkmduchd.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_9 = async_compile.triton('triton_poi_fused_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 524288 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/y7/cy74ayecev2pcofz3fyu6lc473nqeaato7assx62kzcpdkdyzi7o.py # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # out => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/mv/cmvofpunraye55pqf22y3ewvph2z6nefokvusriez7hf4qcucdfo.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_2 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_11 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 32 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/n3/cn34mbt2rtob3eeqb7butchvtwaa2lxs5ritiirymjwyzcwqeits.py # Topologically Sorted Source Nodes: [conv2d_2, out_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # out_3 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_12 = async_compile.triton('triton_poi_fused_convolution_relu_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/il/cilq2hip74d6rz7ttvmpmzknbqn3td7uoov3rzjb5ny3apynoqme.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_5 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_13 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) % 16 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (256*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + (256*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/r4/cr4cxr5slxie5num5fkjya5y6p2mpesokrymomcbss4ipccdadwk.py # Topologically Sorted Source Nodes: [conv2d_4, out_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # out_6 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/n3/cn35qanq7ew2y4riv4ein355sody4dyznrtk6o5akgf2oqgx5ok7.py # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_9 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_15 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = (xindex // 256) % 8 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + (512*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/63/c63ymadmqa5pewt6lz2e5vbnqla654yqubhkwemi5viikn2tjwlb.py # Topologically Sorted Source Nodes: [conv2d_7, out_10], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_7 => convolution_7 # out_10 => relu_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/zu/czunwyy22bkt66zyeary3r6wtcheigfh75hfciirz6pkqyjbo5yl.py # Topologically Sorted Source Nodes: [conv2d_9, out_12], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_9 => convolution_9 # out_12 => relu_9 # Graph fragment: # %convolution_9 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_8, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_9 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_9,), kwargs = {}) triton_poi_fused_convolution_relu_17 = async_compile.triton('triton_poi_fused_convolution_relu_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 64], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_17(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = (yindex // 512) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (32768*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + (64*y3)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/bz/cbzo2gj6jhtht3ai6xpbsoye3rtape6hpo2rq4zzug767jhtvlrx.py # Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_13 => getitem_6, getitem_7 # Graph fragment: # %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_18 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_18(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 4 x3 = (xindex // 4) y4 = yindex x5 = xindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + ((2*x2) + (16*x3) + (64*y4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x2) + (16*x3) + (64*y4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (8 + (2*x2) + (16*x3) + (64*y4)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (9 + (2*x2) + (16*x3) + (64*y4)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1, 1], 1, tl.int8) tmp9 = tl.full([1, 1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1, 1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1, 1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (y0 + (512*x5) + (8192*y1)), tmp6, xmask) tl.store(out_ptr1 + (y0 + (512*x5) + (8192*y1)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/pq/cpqwtybzwrjxjgxnzovhuhgkbi64boj6znsrze46xhxgut5r5rks.py # Topologically Sorted Source Nodes: [conv2d_10, out_14], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_10 => convolution_10 # out_14 => relu_10 # Graph fragment: # %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {}) triton_poi_fused_convolution_relu_19 = async_compile.triton('triton_poi_fused_convolution_relu_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/g6/cg64wx5bddwxgg5xvvugg3wdo2tuwcmeybxsisjz2myhpd3oii5q.py # Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_17 => getitem_8, getitem_9 # Graph fragment: # %getitem_8 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 0), kwargs = {}) # %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_20 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_20(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 2048) % 4 x1 = (xindex // 512) % 4 x6 = xindex tmp0 = (-1) + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-2560) + x6), tmp10, other=float("-inf")) tmp12 = x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-2048) + x6), tmp16, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-1536) + x6), tmp23, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x2 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + ((-512) + x6), tmp30, other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (x6), tmp33, other=float("-inf")) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (512 + x6), tmp36, other=float("-inf")) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x2 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (1536 + x6), tmp43, other=float("-inf")) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (2048 + x6), tmp46, other=float("-inf")) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (2560 + x6), tmp49, other=float("-inf")) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + (x6), tmp51, None) tl.store(out_ptr1 + (x6), tmp76, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/j4/cj4skfvetxhoc7uzi7rl2fedifxp4uvrfozvckid3ugnt2vuch3n.py # Topologically Sorted Source Nodes: [conv2d_13, out_18], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_13 => convolution_13 # out_18 => relu_13 # Graph fragment: # %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_8, %primals_28, %primals_29, [1, 1], [6, 6], [6, 6], False, [0, 0], 1), kwargs = {}) # %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {}) triton_poi_fused_convolution_relu_21 = async_compile.triton('triton_poi_fused_convolution_relu_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_21', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tr/ctrhf6y6tp7beclzz7ocdp4ysczz3oyym47rdpqgsowyowvnsrd6.py # Topologically Sorted Source Nodes: [conv2d_14, conv7_feats], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d_14 => convolution_14 # conv7_feats => relu_14 # Graph fragment: # %convolution_14 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_13, %primals_30, %primals_31, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_14,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_14, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_22 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096, 16], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4096 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 1024 y1 = (yindex // 1024) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (1024*x2) + (16384*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask) tl.store(out_ptr1 + (y0 + (1024*x2) + (16384*y1)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31 = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512, ), (1, )) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512, ), (1, )) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512, ), (1, )) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512, ), (1, )) assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (1024, ), (1, )) assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_31, (1024, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 192, 9, grid=grid(192, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 4096, 9, grid=grid(4096, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_5.run(primals_10, buf5, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_12, buf6, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_6.run(primals_14, buf7, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_7.run(primals_16, buf8, 131072, 9, grid=grid(131072, 9), stream=stream0) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_18, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_20, buf10, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_22, buf11, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_24, buf12, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_8.run(primals_26, buf13, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_26 buf14 = empty_strided_cuda((1024, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_9.run(primals_28, buf14, 524288, 9, grid=grid(524288, 9), stream=stream0) del primals_28 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf16 = buf15; del buf15 # reuse # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_10.run(buf16, primals_2, 1048576, grid=grid(1048576), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf18 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_10.run(buf18, primals_5, 1048576, grid=grid(1048576), stream=stream0) del primals_5 buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf20 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_11.run(buf18, buf19, buf20, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf22 = buf21; del buf21 # reuse # Topologically Sorted Source Nodes: [conv2d_2, out_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_12.run(buf22, primals_7, 524288, grid=grid(524288), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf24 = buf23; del buf23 # reuse # Topologically Sorted Source Nodes: [conv2d_3, out_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_12.run(buf24, primals_9, 524288, grid=grid(524288), stream=stream0) del primals_9 buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf26 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_13.run(buf24, buf25, buf26, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf28 = buf27; del buf27 # reuse # Topologically Sorted Source Nodes: [conv2d_4, out_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_14.run(buf28, primals_11, 262144, grid=grid(262144), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf30 = buf29; del buf29 # reuse # Topologically Sorted Source Nodes: [conv2d_5, out_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_14.run(buf30, primals_13, 262144, grid=grid(262144), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf32 = buf31; del buf31 # reuse # Topologically Sorted Source Nodes: [conv2d_6, out_8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_14.run(buf32, primals_15, 262144, grid=grid(262144), stream=stream0) del primals_15 buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf34 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_15.run(buf32, buf33, buf34, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf35 = extern_kernels.convolution(buf33, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf36 = buf35; del buf35 # reuse # Topologically Sorted Source Nodes: [conv2d_7, out_10], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_16.run(buf36, primals_17, 131072, grid=grid(131072), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf37 = extern_kernels.convolution(buf36, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf38 = buf37; del buf37 # reuse # Topologically Sorted Source Nodes: [conv2d_8, out_11], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_16.run(buf38, primals_19, 131072, grid=grid(131072), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf39 = extern_kernels.convolution(buf38, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf40 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_9, out_12], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_17.run(buf39, primals_21, buf40, 2048, 64, grid=grid(2048, 64), stream=stream0) del buf39 del primals_21 buf41 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) buf42 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) # Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_18.run(buf40, buf41, buf42, 2048, 16, grid=grid(2048, 16), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf43 = extern_kernels.convolution(buf41, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf44 = buf43; del buf43 # reuse # Topologically Sorted Source Nodes: [conv2d_10, out_14], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_19.run(buf44, primals_23, 32768, grid=grid(32768), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf45 = extern_kernels.convolution(buf44, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf46 = buf45; del buf45 # reuse # Topologically Sorted Source Nodes: [conv2d_11, out_15], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_19.run(buf46, primals_25, 32768, grid=grid(32768), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf47 = extern_kernels.convolution(buf46, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf48 = buf47; del buf47 # reuse # Topologically Sorted Source Nodes: [conv2d_12, out_16], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_19.run(buf48, primals_27, 32768, grid=grid(32768), stream=stream0) del primals_27 buf49 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) buf50 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) # Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_20.run(buf48, buf49, buf50, 32768, grid=grid(32768), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf51 = extern_kernels.convolution(buf49, buf14, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf52 = buf51; del buf51 # reuse # Topologically Sorted Source Nodes: [conv2d_13, out_18], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_21.run(buf52, primals_29, 65536, grid=grid(65536), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf53 = extern_kernels.convolution(buf52, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf54 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.float32) buf55 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.bool) # Topologically Sorted Source Nodes: [conv2d_14, conv7_feats], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] triton_poi_fused_convolution_relu_threshold_backward_22.run(buf53, primals_31, buf54, buf55, 4096, 16, grid=grid(4096, 16), stream=stream0) del buf53 del primals_31 return (buf40, buf54, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, buf16, buf18, buf19, buf20, buf22, buf24, buf25, buf26, buf28, buf30, buf32, buf33, buf34, buf36, buf38, buf40, buf41, buf42, buf44, buf46, buf48, buf49, buf50, buf52, buf55, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((1024, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torchvision from torch import nn import torch.nn.functional as F from itertools import product as product import torch.optim import torch.utils.data def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor class VGGBase(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBase, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 300, 300) :return: lower-level feature maps conv4_3 and conv7 """ out = F.relu(self.conv1_1(image)) out = F.relu(self.conv1_2(out)) out = self.pool1(out) out = F.relu(self.conv2_1(out)) out = F.relu(self.conv2_2(out)) out = self.pool2(out) out = F.relu(self.conv3_1(out)) out = F.relu(self.conv3_2(out)) out = F.relu(self.conv3_3(out)) out = self.pool3(out) out = F.relu(self.conv4_1(out)) out = F.relu(self.conv4_2(out)) out = F.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = F.relu(self.conv5_1(out)) out = F.relu(self.conv5_2(out)) out = F.relu(self.conv5_3(out)) out = self.pool5(out) out = F.relu(self.conv6(out)) conv7_feats = F.relu(self.conv7(out)) return conv4_3_feats, conv7_feats def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16 We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = torchvision.models.vgg16(pretrained=True ).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) for i, param in enumerate(param_names[:-4]): state_dict[param] = pretrained_state_dict[pretrained_param_names[i] ] conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view( 4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view( 4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) self.load_state_dict(state_dict) None def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torchvision from torch import nn from itertools import product as product import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_13(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_15(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_17(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 32768 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x2 + 64 * y3), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_18(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 4 x3 = xindex // 4 y4 = yindex x5 = xindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (8 + 2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (9 + 2 * x2 + 16 * x3 + 64 * y4), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1, 1], 1, tl.int8) tmp9 = tl.full([1, 1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1, 1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1, 1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (y0 + 512 * x5 + 8192 * y1), tmp6, xmask) tl.store(out_ptr1 + (y0 + 512 * x5 + 8192 * y1), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_20(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 2048 % 4 x1 = xindex // 512 % 4 x6 = xindex tmp0 = -1 + x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x1 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-2560 + x6), tmp10, other=float('-inf')) tmp12 = x1 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-2048 + x6), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-1536 + x6), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x2 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-512 + x6), tmp30, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x6, tmp33, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (512 + x6), tmp36, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x2 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (1536 + x6), tmp43, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (2048 + x6), tmp46, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (2560 + x6), tmp49, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x6, tmp51, None) tl.store(out_ptr1 + x6, tmp76, None) @triton.jit def triton_poi_fused_convolution_relu_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_22(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 1024 y1 = yindex // 1024 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 1024 * x2 + 16384 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask) tl.store(out_ptr1 + (y0 + 1024 * x2 + 16384 * y1), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (1024,), (1,)) assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_31, (1024,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_2[grid(4096, 9)](primals_4, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_4[grid(16384, 9)](primals_8, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_5[grid(32768, 9)](primals_10, buf5, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_12, buf6, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf7 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_6[grid(65536, 9)](primals_14, buf7, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf8 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_7[grid(131072, 9)](primals_16, buf8, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_18, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf10 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_20, buf10, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf11 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_22, buf11, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf12 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_24, buf12, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_24 buf13 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_8[grid(262144, 9)](primals_26, buf13, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_26 buf14 = empty_strided_cuda((1024, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_9[grid(524288, 9)](primals_28, buf14, 524288, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_28 buf15 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_10[grid(1048576)](buf16, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_10[grid(1048576)](buf18, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf19 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf20 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_11[grid(262144)](buf18, buf19, buf20, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf22 = buf21 del buf21 triton_poi_fused_convolution_relu_12[grid(524288)](buf22, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf24 = buf23 del buf23 triton_poi_fused_convolution_relu_12[grid(524288)](buf24, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf25 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf26 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_13[grid(131072)](buf24, buf25, buf26, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf28 = buf27 del buf27 triton_poi_fused_convolution_relu_14[grid(262144)](buf28, primals_11, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf30 = buf29 del buf29 triton_poi_fused_convolution_relu_14[grid(262144)](buf30, primals_13, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf32 = buf31 del buf31 triton_poi_fused_convolution_relu_14[grid(262144)](buf32, primals_15, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf33 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf34 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_15[grid(65536)](buf32, buf33, buf34, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf35 = extern_kernels.convolution(buf33, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf36 = buf35 del buf35 triton_poi_fused_convolution_relu_16[grid(131072)](buf36, primals_17, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_17 buf37 = extern_kernels.convolution(buf36, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf38 = buf37 del buf37 triton_poi_fused_convolution_relu_16[grid(131072)](buf38, primals_19, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf39 = extern_kernels.convolution(buf38, buf10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf39, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf40 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch .float32) triton_poi_fused_convolution_relu_17[grid(2048, 64)](buf39, primals_21, buf40, 2048, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf39 del primals_21 buf41 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) buf42 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_18[grid(2048, 16)](buf40, buf41, buf42, 2048, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) buf43 = extern_kernels.convolution(buf41, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf43, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf44 = buf43 del buf43 triton_poi_fused_convolution_relu_19[grid(32768)](buf44, primals_23, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_23 buf45 = extern_kernels.convolution(buf44, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf45, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf46 = buf45 del buf45 triton_poi_fused_convolution_relu_19[grid(32768)](buf46, primals_25, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_25 buf47 = extern_kernels.convolution(buf46, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf47, (4, 512, 4, 4), (8192, 1, 2048, 512)) buf48 = buf47 del buf47 triton_poi_fused_convolution_relu_19[grid(32768)](buf48, primals_27, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_27 buf49 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.float32) buf50 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) triton_poi_fused_max_pool2d_with_indices_20[grid(32768)](buf48, buf49, buf50, 32768, XBLOCK=128, num_warps=4, num_stages=1) buf51 = extern_kernels.convolution(buf49, buf14, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf51, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf52 = buf51 del buf51 triton_poi_fused_convolution_relu_21[grid(65536)](buf52, primals_29, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_29 buf53 = extern_kernels.convolution(buf52, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf54 = empty_strided_cuda((4, 1024, 4, 4), (16384, 16, 4, 1), torch.float32) buf55 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_22[grid(4096, 16) ](buf53, primals_31, buf54, buf55, 4096, 16, XBLOCK=16, YBLOCK= 64, num_warps=4, num_stages=1) del buf53 del primals_31 return (buf40, buf54, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf10, buf11, buf12, buf13, buf14, primals_30, buf16, buf18, buf19, buf20, buf22, buf24, buf25, buf26, buf28, buf30, buf32, buf33, buf34, buf36, buf38, buf40, buf41, buf42, buf44, buf46, buf48, buf49, buf50, buf52, buf55) def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor class VGGBaseNew(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBaseNew, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16 We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = torchvision.models.vgg16(pretrained=True ).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) for i, param in enumerate(param_names[:-4]): state_dict[param] = pretrained_state_dict[pretrained_param_names[i] ] conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view( 4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view( 4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) self.load_state_dict(state_dict) None def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.conv1_2.weight primals_5 = self.conv1_2.bias primals_6 = self.conv2_1.weight primals_7 = self.conv2_1.bias primals_8 = self.conv2_2.weight primals_9 = self.conv2_2.bias primals_10 = self.conv3_1.weight primals_11 = self.conv3_1.bias primals_12 = self.conv3_2.weight primals_13 = self.conv3_2.bias primals_14 = self.conv3_3.weight primals_15 = self.conv3_3.bias primals_16 = self.conv4_1.weight primals_17 = self.conv4_1.bias primals_18 = self.conv4_2.weight primals_19 = self.conv4_2.bias primals_20 = self.conv4_3.weight primals_21 = self.conv4_3.bias primals_22 = self.conv5_1.weight primals_23 = self.conv5_1.bias primals_24 = self.conv5_2.weight primals_25 = self.conv5_2.bias primals_26 = self.conv5_3.weight primals_27 = self.conv5_3.bias primals_28 = self.conv6.weight primals_29 = self.conv6.bias primals_30 = self.conv7.weight primals_31 = self.conv7.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31]) return output[0], output[1]
aarashfeizi/a-PyTorch-Tutorial-to-Object-Detection
VGGBase
false
1,906
[ "MIT" ]
0
a9e1f3092d4b8c094bff5cd0897e0e3c1e0bc9c2
https://github.com/aarashfeizi/a-PyTorch-Tutorial-to-Object-Detection/tree/a9e1f3092d4b8c094bff5cd0897e0e3c1e0bc9c2
import torch import torchvision from torch import nn import torch.nn.functional as F from itertools import product as product import torch.optim import torch.utils.data def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor class Model(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super().__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 300, 300) :return: lower-level feature maps conv4_3 and conv7 """ out = F.relu(self.conv1_1(image)) out = F.relu(self.conv1_2(out)) out = self.pool1(out) out = F.relu(self.conv2_1(out)) out = F.relu(self.conv2_2(out)) out = self.pool2(out) out = F.relu(self.conv3_1(out)) out = F.relu(self.conv3_2(out)) out = F.relu(self.conv3_3(out)) out = self.pool3(out) out = F.relu(self.conv4_1(out)) out = F.relu(self.conv4_2(out)) out = F.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = F.relu(self.conv5_1(out)) out = F.relu(self.conv5_2(out)) out = F.relu(self.conv5_3(out)) out = self.pool5(out) out = F.relu(self.conv6(out)) conv7_feats = F.relu(self.conv7(out)) return conv4_3_feats, conv7_feats def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16 We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. # ... truncated (>4000 chars) for memory efficiency
SSD300
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/2q/c2qsph7yuvd4qrjdx7qhitc2tkim3pjng4rqgufiypesenwycnhv.py # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # out => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[67108864], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 67108864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 262144) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/se/csey4casydds7ttdva4dpczpio6jwynlr7qsuqonjcwfmq67hxyv.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_2 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16777216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = (xindex // 256) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (1024*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (1024*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (512 + (2*x0) + (1024*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (513 + (2*x0) + (1024*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/si/csisjq7rc4algelsz22lsae4qhhrrjvjryyw5k5o6x3fdlimo55m.py # Topologically Sorted Source Nodes: [conv2d_2, out_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # out_3 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 33554432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 65536) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/vv/cvvcasx345h75eoxksekaeisc7iaf3bqneorw5etqpkzdja2ozs7.py # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_5 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 8388608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (512*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (512*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (256 + (2*x0) + (512*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (257 + (2*x0) + (512*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/pn/cpnor5ydof7dlspqdxdhkrhf2auj7pppdumfestnp6t2dvc7ahdp.py # Topologically Sorted Source Nodes: [conv2d_4, out_6], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_4 => convolution_4 # out_6 => relu_4 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16777216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16384) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/yg/cygiwnm4ri26idrrwplrrcwdugludlchq2iib6x7f5lgij24xv3q.py # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_9 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (256*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (256*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (128 + (2*x0) + (256*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (129 + (2*x0) + (256*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ro/cro7juuw5xd4di6yakssncsxdhnpfutfkymieevyezfopo5vi5f2.py # Topologically Sorted Source Nodes: [conv2d_7, out_10], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_7 => convolution_7 # out_10 => relu_7 # Graph fragment: # %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {}) triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8388608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/27/c27dahr6gu73agvkm5pgjug2pbakmm76uviwrqiqcnpmtijfjx7c.py # Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_13 => getitem_6, getitem_7 # Graph fragment: # %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = (xindex // 32) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/rz/crzaczqmdz32jx3wlam76xlof7bkrj4sqcvs2mxm2pldktqwxkjt.py # Topologically Sorted Source Nodes: [conv2d_10, out_14], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_10 => convolution_10 # out_14 => relu_10 # Graph fragment: # %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {}) triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ct/cctewtzbghhtqagpkqkvir7v3nfuy5ixuei5d65icnryikadosqc.py # Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # out_17 => getitem_8, getitem_9 # Graph fragment: # %getitem_8 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 0), kwargs = {}) # %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_9 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 32) % 32 x0 = xindex % 32 x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-33) + x4), tmp10, other=float("-inf")) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-32) + x4), tmp16, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-31) + x4), tmp23, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + ((-1) + x4), tmp30, other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (x4), tmp33, other=float("-inf")) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36, other=float("-inf")) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (31 + x4), tmp43, other=float("-inf")) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (32 + x4), tmp46, other=float("-inf")) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (33 + x4), tmp49, other=float("-inf")) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + (x4), tmp51, None) tl.store(out_ptr1 + (x4), tmp76, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/6k/c6k6gsglrybvjyfonqtp54l2icmsufqa67hpnv3btr4543ox255t.py # Topologically Sorted Source Nodes: [conv2d_13, out_18], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_13 => convolution_13 # out_18 => relu_13 # Graph fragment: # %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_8, %primals_28, %primals_29, [1, 1], [6, 6], [6, 6], False, [0, 0], 1), kwargs = {}) # %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {}) triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4194304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 1024 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/g6/cg6dnpxzqufsxykijivl4wos4pzjcbbtairqgnptitj2vdjgyiey.py # Topologically Sorted Source Nodes: [pow_1, sum_1, norm], Original ATen: [aten.pow, aten.sum, aten.sqrt] # Source node to ATen node mapping: # norm => sqrt # pow_1 => pow_1 # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu_9, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) triton_red_fused_pow_sqrt_sum_11 = async_compile.triton('triton_red_fused_pow_sqrt_sum_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[16384, 512], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_pow_sqrt_sum_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_pow_sqrt_sum_11(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 16384 rnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = (xindex // 4096) _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (2097152*x1)), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp3) tl.debug_barrier() tl.store(in_out_ptr0 + (x3), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/bn/cbnctktjgp7t3nzk7cbjdwatnjesdbubsp42k5hmnarqp4wy6aos.py # Topologically Sorted Source Nodes: [conv4_3_feats, conv4_3_feats_1], Original ATen: [aten.div, aten.mul] # Source node to ATen node mapping: # conv4_3_feats => div # conv4_3_feats_1 => mul # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu_9, %sqrt), kwargs = {}) # %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_32), kwargs = {}) triton_poi_fused_div_mul_12 = async_compile.triton('triton_poi_fused_div_mul_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 8388608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = (xindex // 2097152) x1 = (xindex // 4096) % 512 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x3), tmp2, None) tl.store(out_ptr1 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/7e/c7eo6nf5i4jbfcbm6repz4vmeacyjdvnhnob55afz6cmr27ssfpf.py # Topologically Sorted Source Nodes: [conv2d_15, out_19], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_15 => convolution_15 # out_19 => relu_15 # Graph fragment: # %convolution_15 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_14, %primals_33, %primals_34, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_15 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_15,), kwargs = {}) triton_poi_fused_convolution_relu_13 = async_compile.triton('triton_poi_fused_convolution_relu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1024) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/os/cosszfrjynxxkwdsxxfvdhcxozstp3jmgtlqb5zwrbcmgiswrqd3.py # Topologically Sorted Source Nodes: [conv2d_16, out_20], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_16 => convolution_16 # out_20 => relu_16 # Graph fragment: # %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_15, %primals_35, %primals_36, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_16 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_16,), kwargs = {}) triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/4q/c4qi5rxcv3r3wq6y4cvvf3g2jgztsnqzhvjd624hhs7nn3zfyrza.py # Topologically Sorted Source Nodes: [conv2d_17, out_21], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_17 => convolution_17 # out_21 => relu_17 # Graph fragment: # %convolution_17 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_16, %primals_37, %primals_38, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_17 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_17,), kwargs = {}) triton_poi_fused_convolution_relu_15 = async_compile.triton('triton_poi_fused_convolution_relu_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 256) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/nw/cnwta4czjivsbztus2tqw6ksxgwb53lhn4haikmufrci7ezow4lo.py # Topologically Sorted Source Nodes: [conv2d_18, out_22], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_18 => convolution_18 # out_22 => relu_18 # Graph fragment: # %convolution_18 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_17, %primals_39, %primals_40, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_18 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_18,), kwargs = {}) triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/dy/cdyqtsyq3zalq6uxljpp7l7awgppvbql7xysw4zlqyrrtqm73a7t.py # Topologically Sorted Source Nodes: [conv2d_19, out_23], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_19 => convolution_19 # out_23 => relu_19 # Graph fragment: # %convolution_19 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_18, %primals_41, %primals_42, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_19 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_19,), kwargs = {}) triton_poi_fused_convolution_relu_17 = async_compile.triton('triton_poi_fused_convolution_relu_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/fg/cfgcuo4oirqbbwiyditzzmzwst7ym5zfqol5vhilmjoswdttpouj.py # Topologically Sorted Source Nodes: [conv2d_20, out_24], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_20 => convolution_20 # out_24 => relu_20 # Graph fragment: # %convolution_20 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_19, %primals_43, %primals_44, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_20 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_20,), kwargs = {}) triton_poi_fused_convolution_relu_18 = async_compile.triton('triton_poi_fused_convolution_relu_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 36864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 36) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/tz/ctzm62zmq4eeli7oqdvyfsjqefvgdi2gl2schefhtdg77ra6tgac.py # Topologically Sorted Source Nodes: [conv2d_21, out_25], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_21 => convolution_21 # out_25 => relu_21 # Graph fragment: # %convolution_21 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_20, %primals_45, %primals_46, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_21 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_21,), kwargs = {}) triton_poi_fused_convolution_relu_19 = async_compile.triton('triton_poi_fused_convolution_relu_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 18432 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 36) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/ss/csswcsc3cundvg6yebux77yizbxo3zagcavuqq5eppgqt4uhsq55.py # Topologically Sorted Source Nodes: [conv2d_22, conv11_2_feats], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv11_2_feats => relu_22 # conv2d_22 => convolution_22 # Graph fragment: # %convolution_22 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_21, %primals_47, %primals_48, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_22 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_22,), kwargs = {}) triton_poi_fused_convolution_relu_20 = async_compile.triton('triton_poi_fused_convolution_relu_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_6/inductor_cache/cv/ccvahx445gtqwoibtu6zmqasjrfl7qfkuzhnrc4afyoqfxmjtlbc.py # Topologically Sorted Source Nodes: [locs], Original ATen: [aten.cat] # Source node to ATen node mapping: # locs => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3, %view_4, %view_5], 1), kwargs = {}) triton_poi_fused_cat_21 = async_compile.triton('triton_poi_fused_cat_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 394496 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 24656 x0 = xindex % 4 x2 = (xindex // 98624) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 16384, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4096*((x0 + (4*x1)) % 16)) + (65536*(((x0 + (4*x1) + (65536*x2)) // 65536) % 4)) + (((x0 + (4*x1)) // 16) % 4096)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + ((x0 + (4*x1)) % 16), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 22528, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + ((1024*((x0 + (4*((-16384) + x1))) % 24)) + (24576*(((x0 + (4*((-16384) + x1)) + (24576*x2)) // 24576) % 4)) + (((x0 + (4*((-16384) + x1))) // 24) % 1024)), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.load(in_ptr3 + ((x0 + (4*((-16384) + x1))) % 24), tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 24064, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr4 + ((256*((x0 + (4*((-22528) + x1))) % 24)) + (6144*(((x0 + (4*((-22528) + x1)) + (6144*x2)) // 6144) % 4)) + (((x0 + (4*((-22528) + x1))) // 24) % 256)), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr5 + ((x0 + (4*((-22528) + x1))) % 24), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tmp29 = tl.full([1], 24448, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr6 + ((64*((x0 + (4*((-24064) + x1))) % 24)) + (1536*(((x0 + (4*((-24064) + x1)) + (1536*x2)) // 1536) % 4)) + (((x0 + (4*((-24064) + x1))) // 24) % 64)), tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp33 = tl.load(in_ptr7 + ((x0 + (4*((-24064) + x1))) % 24), tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tmp32 + tmp33 tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp31, tmp34, tmp35) tmp37 = tmp0 >= tmp29 tmp38 = tl.full([1], 24592, tl.int64) tmp39 = tmp0 < tmp38 tmp40 = tmp37 & tmp39 tmp41 = tl.load(in_ptr8 + ((36*((x0 + (4*((-24448) + x1))) % 16)) + (576*(((x0 + (4*((-24448) + x1)) + (576*x2)) // 576) % 4)) + (((x0 + (4*((-24448) + x1))) // 16) % 36)), tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr9 + ((x0 + (4*((-24448) + x1))) % 16), tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 + tmp42 tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype) tmp45 = tl.where(tmp40, tmp43, tmp44) tmp46 = tmp0 >= tmp38 tmp47 = tl.full([1], 24656, tl.int64) tmp48 = tmp0 < tmp47 tmp49 = tl.load(in_ptr10 + ((16*((x0 + (4*((-24592) + x1))) % 16)) + (256*(((x0 + (4*((-24592) + x1)) + (256*x2)) // 256) % 4)) + (((x0 + (4*((-24592) + x1))) // 16) % 16)), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.load(in_ptr11 + ((x0 + (4*((-24592) + x1))) % 16), tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tmp49 + tmp50 tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp46, tmp51, tmp52) tmp54 = tl.where(tmp40, tmp45, tmp53) tmp55 = tl.where(tmp31, tmp36, tmp54) tmp56 = tl.where(tmp22, tmp27, tmp55) tmp57 = tl.where(tmp13, tmp18, tmp56) tmp58 = tl.where(tmp4, tmp9, tmp57) tl.store(out_ptr0 + (x3), tmp58, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72 = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 512, 512), (786432, 262144, 512, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256, ), (1, )) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512, ), (1, )) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512, ), (1, )) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512, ), (1, )) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512, ), (1, )) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512, ), (1, )) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512, ), (1, )) assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (1024, ), (1, )) assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_31, (1024, ), (1, )) assert_size_stride(primals_32, (1, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_34, (256, ), (1, )) assert_size_stride(primals_35, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_36, (512, ), (1, )) assert_size_stride(primals_37, (128, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_38, (128, ), (1, )) assert_size_stride(primals_39, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_40, (256, ), (1, )) assert_size_stride(primals_41, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_42, (128, ), (1, )) assert_size_stride(primals_43, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_44, (256, ), (1, )) assert_size_stride(primals_45, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_46, (128, ), (1, )) assert_size_stride(primals_47, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_48, (256, ), (1, )) assert_size_stride(primals_49, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_50, (16, ), (1, )) assert_size_stride(primals_51, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_52, (24, ), (1, )) assert_size_stride(primals_53, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_54, (24, ), (1, )) assert_size_stride(primals_55, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_56, (24, ), (1, )) assert_size_stride(primals_57, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_58, (16, ), (1, )) assert_size_stride(primals_59, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_60, (16, ), (1, )) assert_size_stride(primals_61, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_62, (16, ), (1, )) assert_size_stride(primals_63, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_64, (24, ), (1, )) assert_size_stride(primals_65, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_66, (24, ), (1, )) assert_size_stride(primals_67, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_68, (24, ), (1, )) assert_size_stride(primals_69, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_70, (16, ), (1, )) assert_size_stride(primals_71, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_72, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 512, 512), (16777216, 262144, 512, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 67108864, grid=grid(67108864), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 512, 512), (16777216, 262144, 512, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 67108864, grid=grid(67108864), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.int8) # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 16777216, grid=grid(16777216), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 256, 256), (8388608, 65536, 256, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [conv2d_2, out_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 33554432, grid=grid(33554432), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 256, 256), (8388608, 65536, 256, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_3, out_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf9, primals_9, 33554432, grid=grid(33554432), stream=stream0) del primals_9 buf10 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.int8) # Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 8388608, grid=grid(8388608), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 128, 128), (4194304, 16384, 128, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [conv2d_4, out_6], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf13, primals_11, 16777216, grid=grid(16777216), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 128, 128), (4194304, 16384, 128, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [conv2d_5, out_7], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf15, primals_13, 16777216, grid=grid(16777216), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 128, 128), (4194304, 16384, 128, 1)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [conv2d_6, out_8], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf17, primals_15, 16777216, grid=grid(16777216), stream=stream0) del primals_15 buf18 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) buf19 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.int8) # Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf17, buf18, buf19, 4194304, grid=grid(4194304), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 512, 64, 64), (2097152, 4096, 64, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [conv2d_7, out_10], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf21, primals_17, 8388608, grid=grid(8388608), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf22 = extern_kernels.convolution(buf21, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 512, 64, 64), (2097152, 4096, 64, 1)) buf23 = buf22; del buf22 # reuse # Topologically Sorted Source Nodes: [conv2d_8, out_11], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf23, primals_19, 8388608, grid=grid(8388608), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 64, 64), (2097152, 4096, 64, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [conv2d_9, out_12], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_6.run(buf25, primals_21, 8388608, grid=grid(8388608), stream=stream0) del primals_21 buf26 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32) buf27 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_7.run(buf25, buf26, buf27, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 32, 32), (524288, 1024, 32, 1)) buf29 = buf28; del buf28 # reuse # Topologically Sorted Source Nodes: [conv2d_10, out_14], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf29, primals_23, 2097152, grid=grid(2097152), stream=stream0) del primals_23 # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 512, 32, 32), (524288, 1024, 32, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [conv2d_11, out_15], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf31, primals_25, 2097152, grid=grid(2097152), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 512, 32, 32), (524288, 1024, 32, 1)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [conv2d_12, out_16], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_8.run(buf33, primals_27, 2097152, grid=grid(2097152), stream=stream0) del primals_27 buf34 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32) buf35 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.int8) # Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_9.run(buf33, buf34, buf35, 2097152, grid=grid(2097152), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 1024, 32, 32), (1048576, 1024, 32, 1)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [conv2d_13, out_18], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_10.run(buf37, primals_29, 4194304, grid=grid(4194304), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 1024, 32, 32), (1048576, 1024, 32, 1)) buf39 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [conv2d_14, conv7_feats], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_10.run(buf39, primals_31, 4194304, grid=grid(4194304), stream=stream0) del primals_31 buf40 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf41 = reinterpret_tensor(buf40, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf40 # reuse # Topologically Sorted Source Nodes: [pow_1, sum_1, norm], Original ATen: [aten.pow, aten.sum, aten.sqrt] triton_red_fused_pow_sqrt_sum_11.run(buf41, buf25, 16384, 512, grid=grid(16384), stream=stream0) buf42 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) buf43 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [conv4_3_feats, conv4_3_feats_1], Original ATen: [aten.div, aten.mul] triton_poi_fused_div_mul_12.run(buf25, buf41, primals_32, buf42, buf43, 8388608, grid=grid(8388608), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf44 = extern_kernels.convolution(buf39, primals_33, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf45 = buf44; del buf44 # reuse # Topologically Sorted Source Nodes: [conv2d_15, out_19], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_13.run(buf45, primals_34, 1048576, grid=grid(1048576), stream=stream0) del primals_34 # Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution] buf46 = extern_kernels.convolution(buf45, primals_35, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 512, 16, 16), (131072, 256, 16, 1)) buf47 = buf46; del buf46 # reuse # Topologically Sorted Source Nodes: [conv2d_16, out_20], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_14.run(buf47, primals_36, 524288, grid=grid(524288), stream=stream0) del primals_36 # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf48 = extern_kernels.convolution(buf47, primals_37, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 128, 16, 16), (32768, 256, 16, 1)) buf49 = buf48; del buf48 # reuse # Topologically Sorted Source Nodes: [conv2d_17, out_21], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_15.run(buf49, primals_38, 131072, grid=grid(131072), stream=stream0) del primals_38 # Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution] buf50 = extern_kernels.convolution(buf49, primals_39, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 256, 8, 8), (16384, 64, 8, 1)) buf51 = buf50; del buf50 # reuse # Topologically Sorted Source Nodes: [conv2d_18, out_22], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_16.run(buf51, primals_40, 65536, grid=grid(65536), stream=stream0) del primals_40 # Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution] buf52 = extern_kernels.convolution(buf51, primals_41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 128, 8, 8), (8192, 64, 8, 1)) buf53 = buf52; del buf52 # reuse # Topologically Sorted Source Nodes: [conv2d_19, out_23], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_17.run(buf53, primals_42, 32768, grid=grid(32768), stream=stream0) del primals_42 # Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution] buf54 = extern_kernels.convolution(buf53, primals_43, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 256, 6, 6), (9216, 36, 6, 1)) buf55 = buf54; del buf54 # reuse # Topologically Sorted Source Nodes: [conv2d_20, out_24], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_18.run(buf55, primals_44, 36864, grid=grid(36864), stream=stream0) del primals_44 # Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution] buf56 = extern_kernels.convolution(buf55, primals_45, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 128, 6, 6), (4608, 36, 6, 1)) buf57 = buf56; del buf56 # reuse # Topologically Sorted Source Nodes: [conv2d_21, out_25], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_19.run(buf57, primals_46, 18432, grid=grid(18432), stream=stream0) del primals_46 # Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution] buf58 = extern_kernels.convolution(buf57, primals_47, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf58, (4, 256, 4, 4), (4096, 16, 4, 1)) buf59 = buf58; del buf58 # reuse # Topologically Sorted Source Nodes: [conv2d_22, conv11_2_feats], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_20.run(buf59, primals_48, 16384, grid=grid(16384), stream=stream0) del primals_48 # Topologically Sorted Source Nodes: [l_conv4_3], Original ATen: [aten.convolution] buf60 = extern_kernels.convolution(buf43, primals_49, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf60, (4, 16, 64, 64), (65536, 4096, 64, 1)) # Topologically Sorted Source Nodes: [l_conv7], Original ATen: [aten.convolution] buf61 = extern_kernels.convolution(buf39, primals_51, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 24, 32, 32), (24576, 1024, 32, 1)) # Topologically Sorted Source Nodes: [l_conv8_2], Original ATen: [aten.convolution] buf62 = extern_kernels.convolution(buf47, primals_53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf62, (4, 24, 16, 16), (6144, 256, 16, 1)) # Topologically Sorted Source Nodes: [l_conv9_2], Original ATen: [aten.convolution] buf63 = extern_kernels.convolution(buf51, primals_55, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf63, (4, 24, 8, 8), (1536, 64, 8, 1)) # Topologically Sorted Source Nodes: [l_conv10_2], Original ATen: [aten.convolution] buf64 = extern_kernels.convolution(buf55, primals_57, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 16, 6, 6), (576, 36, 6, 1)) # Topologically Sorted Source Nodes: [l_conv11_2], Original ATen: [aten.convolution] buf65 = extern_kernels.convolution(buf59, primals_59, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 16, 4, 4), (256, 16, 4, 1)) # Topologically Sorted Source Nodes: [c_conv4_3], Original ATen: [aten.convolution] buf66 = extern_kernels.convolution(buf43, primals_61, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 16, 64, 64), (65536, 4096, 64, 1)) # Topologically Sorted Source Nodes: [c_conv7], Original ATen: [aten.convolution] buf67 = extern_kernels.convolution(buf39, primals_63, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 24, 32, 32), (24576, 1024, 32, 1)) # Topologically Sorted Source Nodes: [c_conv8_2], Original ATen: [aten.convolution] buf68 = extern_kernels.convolution(buf47, primals_65, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf68, (4, 24, 16, 16), (6144, 256, 16, 1)) # Topologically Sorted Source Nodes: [c_conv9_2], Original ATen: [aten.convolution] buf69 = extern_kernels.convolution(buf51, primals_67, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf69, (4, 24, 8, 8), (1536, 64, 8, 1)) # Topologically Sorted Source Nodes: [c_conv10_2], Original ATen: [aten.convolution] buf70 = extern_kernels.convolution(buf55, primals_69, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 16, 6, 6), (576, 36, 6, 1)) # Topologically Sorted Source Nodes: [c_conv11_2], Original ATen: [aten.convolution] buf71 = extern_kernels.convolution(buf59, primals_71, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf71, (4, 16, 4, 4), (256, 16, 4, 1)) buf72 = empty_strided_cuda((4, 24656, 4), (98624, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [locs], Original ATen: [aten.cat] triton_poi_fused_cat_21.run(buf60, primals_50, buf61, primals_52, buf62, primals_54, buf63, primals_56, buf64, primals_58, buf65, primals_60, buf72, 394496, grid=grid(394496), stream=stream0) del buf60 del buf61 del buf62 del buf63 del buf64 del buf65 del primals_50 del primals_52 del primals_54 del primals_56 del primals_58 del primals_60 buf73 = empty_strided_cuda((4, 24656, 4), (98624, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [classes_scores], Original ATen: [aten.cat] triton_poi_fused_cat_21.run(buf66, primals_62, buf67, primals_64, buf68, primals_66, buf69, primals_68, buf70, primals_70, buf71, primals_72, buf73, 394496, grid=grid(394496), stream=stream0) del buf66 del buf67 del buf68 del buf69 del buf70 del buf71 del primals_62 del primals_64 del primals_66 del primals_68 del primals_70 del primals_72 return (buf72, buf73, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_33, primals_35, primals_37, primals_39, primals_41, primals_43, primals_45, primals_47, primals_49, primals_51, primals_53, primals_55, primals_57, primals_59, primals_61, primals_63, primals_65, primals_67, primals_69, primals_71, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf25, buf26, buf27, buf29, buf31, buf33, buf34, buf35, buf37, buf39, buf41, buf42, buf43, buf45, buf47, buf49, buf51, buf53, buf55, buf57, buf59, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 512, 512), (786432, 262144, 512, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((1024, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((1, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((128, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_48 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_49 = rand_strided((16, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_50 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_51 = rand_strided((24, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_52 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_53 = rand_strided((24, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_54 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_55 = rand_strided((24, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_56 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_57 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_58 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_59 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_60 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_61 = rand_strided((16, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_62 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_63 = rand_strided((24, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_64 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_65 = rand_strided((24, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_66 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_67 = rand_strided((24, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_68 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32) primals_69 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_70 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_71 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_72 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torchvision from torch import nn import torch.nn.functional as F from math import sqrt from itertools import product as product import torch.optim import torch.utils.data def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor def cxcy_to_xy(cxcy): """ Convert bounding boxes from center-size coordinates (c_x, c_y, w, h) to boundary coordinates (x_min, y_min, x_max, y_max). :param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4) :return: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4) """ return torch.cat([cxcy[:, :2] - cxcy[:, 2:] / 2, cxcy[:, :2] + cxcy[:, 2:] / 2], 1) def find_intersection(set_1, set_2): """ Find the intersection of every box combination between two sets of boxes that are in boundary coordinates. :param set_1: set 1, a tensor of dimensions (n1, 4) :param set_2: set 2, a tensor of dimensions (n2, 4) :return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2) """ lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2]. unsqueeze(0)) upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:]. unsqueeze(0)) intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] def find_jaccard_overlap(set_1, set_2): """ Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates. :param set_1: set 1, a tensor of dimensions (n1, 4) :param set_2: set 2, a tensor of dimensions (n2, 4) :return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2) """ intersection = find_intersection(set_1, set_2) areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1]) areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1]) union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection return intersection / union def gcxgcy_to_cxcy(gcxgcy, priors_cxcy): """ Decode bounding box coordinates predicted by the model, since they are encoded in the form mentioned above. They are decoded into center-size coordinates. This is the inverse of the function above. :param gcxgcy: encoded bounding boxes, i.e. output of the model, a tensor of size (n_priors, 4) :param priors_cxcy: prior boxes with respect to which the encoding is defined, a tensor of size (n_priors, 4) :return: decoded bounding boxes in center-size form, a tensor of size (n_priors, 4) """ return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy [:, :2], torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1) class VGGBase(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBase, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 300, 300) :return: lower-level feature maps conv4_3 and conv7 """ out = F.relu(self.conv1_1(image)) out = F.relu(self.conv1_2(out)) out = self.pool1(out) out = F.relu(self.conv2_1(out)) out = F.relu(self.conv2_2(out)) out = self.pool2(out) out = F.relu(self.conv3_1(out)) out = F.relu(self.conv3_2(out)) out = F.relu(self.conv3_3(out)) out = self.pool3(out) out = F.relu(self.conv4_1(out)) out = F.relu(self.conv4_2(out)) out = F.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = F.relu(self.conv5_1(out)) out = F.relu(self.conv5_2(out)) out = F.relu(self.conv5_3(out)) out = self.pool5(out) out = F.relu(self.conv6(out)) conv7_feats = F.relu(self.conv7(out)) return conv4_3_feats, conv7_feats def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16 We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = torchvision.models.vgg16(pretrained=True ).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) for i, param in enumerate(param_names[:-4]): state_dict[param] = pretrained_state_dict[pretrained_param_names[i] ] conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view( 4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view( 4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) self.load_state_dict(state_dict) None class AuxiliaryConvolutions(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super(AuxiliaryConvolutions, self).__init__() self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0) self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv7_feats): """ Forward propagation. :param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19) :return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2 """ out = F.relu(self.conv8_1(conv7_feats)) out = F.relu(self.conv8_2(out)) conv8_2_feats = out out = F.relu(self.conv9_1(out)) out = F.relu(self.conv9_2(out)) conv9_2_feats = out out = F.relu(self.conv10_1(out)) out = F.relu(self.conv10_2(out)) conv10_2_feats = out out = F.relu(self.conv11_1(out)) conv11_2_feats = F.relu(self.conv11_2(out)) return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats class PredictionConvolutions(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes. See 'cxcy_to_gcxgcy' in utils.py for the encoding definition. The class scores represent the scores of each object class in each of the 8732 bounding boxes located. A high score for 'background' = no object. """ def __init__(self, n_classes): """ :param n_classes: number of different types of objects """ super(PredictionConvolutions, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 4, 'conv11_2': 4} self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1) self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size= 3, padding=1) self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1) self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats): """ Forward propagation. :param conv4_3_feats: conv4_3 feature map, a tensor of dimensions (N, 512, 38, 38) :param conv7_feats: conv7 feature map, a tensor of dimensions (N, 1024, 19, 19) :param conv8_2_feats: conv8_2 feature map, a tensor of dimensions (N, 512, 10, 10) :param conv9_2_feats: conv9_2 feature map, a tensor of dimensions (N, 256, 5, 5) :param conv10_2_feats: conv10_2 feature map, a tensor of dimensions (N, 256, 3, 3) :param conv11_2_feats: conv11_2 feature map, a tensor of dimensions (N, 256, 1, 1) :return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image """ batch_size = conv4_3_feats.size(0) l_conv4_3 = self.loc_conv4_3(conv4_3_feats) l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous() l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) l_conv7 = self.loc_conv7(conv7_feats) l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() l_conv7 = l_conv7.view(batch_size, -1, 4) l_conv8_2 = self.loc_conv8_2(conv8_2_feats) l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) l_conv9_2 = self.loc_conv9_2(conv9_2_feats) l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) l_conv10_2 = self.loc_conv10_2(conv10_2_feats) l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) l_conv11_2 = self.loc_conv11_2(conv11_2_feats) l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) c_conv4_3 = self.cl_conv4_3(conv4_3_feats) c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous() c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes) c_conv7 = self.cl_conv7(conv7_feats) c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() c_conv7 = c_conv7.view(batch_size, -1, self.n_classes) c_conv8_2 = self.cl_conv8_2(conv8_2_feats) c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) c_conv9_2 = self.cl_conv9_2(conv9_2_feats) c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) c_conv10_2 = self.cl_conv10_2(conv10_2_feats) c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) c_conv11_2 = self.cl_conv11_2(conv11_2_feats) c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1) classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2], dim=1) return locs, classes_scores class SSD300(nn.Module): """ The SSD300 network - encapsulates the base VGG network, auxiliary, and prediction convolutions. """ def __init__(self, n_classes): super(SSD300, self).__init__() self.n_classes = n_classes self.base = VGGBase() self.aux_convs = AuxiliaryConvolutions() self.pred_convs = PredictionConvolutions(n_classes) self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) nn.init.constant_(self.rescale_factors, 20) self.priors_cxcy = self.create_prior_boxes() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 300, 300) :return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image """ conv4_3_feats, conv7_feats = self.base(image) norm = conv4_3_feats.pow(2).sum(dim=1, keepdim=True).sqrt() conv4_3_feats = conv4_3_feats / norm conv4_3_feats = conv4_3_feats * self.rescale_factors conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats = (self .aux_convs(conv7_feats)) locs, classes_scores = self.pred_convs(conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats) return locs, classes_scores def create_prior_boxes(self): """ Create the 8732 prior (default) boxes for the SSD300, as defined in the paper. :return: prior boxes in center-size coordinates, a tensor of dimensions (8732, 4) """ fmap_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5, 'conv10_2': 3, 'conv11_2': 1} obj_scales = {'conv4_3': 0.1, 'conv7': 0.2, 'conv8_2': 0.375, 'conv9_2': 0.55, 'conv10_2': 0.725, 'conv11_2': 0.9} aspect_ratios = {'conv4_3': [2.0, 4.0, 0.25], 'conv7': [2.0, 4.0, 3.0, 0.25, 0.333], 'conv8_2': [2.0, 4.0, 3.0, 0.25, 0.333], 'conv9_2': [2.0, 4.0, 3.0, 0.25, 0.333], 'conv10_2': [2.0, 4.0, 0.25], 'conv11_2': [2.0, 4.0, 0.25]} fmaps = list(fmap_dims.keys()) prior_boxes = [] for k, fmap in enumerate(fmaps): for i in range(fmap_dims[fmap]): for j in range(fmap_dims[fmap]): cx = (j + 0.5) / fmap_dims[fmap] cy = (i + 0.5) / fmap_dims[fmap] for ratio in aspect_ratios[fmap]: prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt (ratio), obj_scales[fmap] / sqrt(ratio)]) if ratio == 2.0: try: additional_scale = sqrt(obj_scales[fmap] * obj_scales[fmaps[k + 1]]) except IndexError: additional_scale = 1.0 prior_boxes.append([cx, cy, additional_scale, additional_scale]) prior_boxes = torch.FloatTensor(prior_boxes) prior_boxes.clamp_(0, 1) return prior_boxes def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k): """ Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects. For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold. :param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4) :param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes) :param min_score: minimum threshold for a box to be considered a match for a certain class :param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS :param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k' :return: detections (boxes, labels, and scores), lists of length batch_size """ batch_size = predicted_locs.size(0) n_priors = self.priors_cxcy.size(0) predicted_scores = F.softmax(predicted_scores, dim=2) all_images_boxes = list() all_images_labels = list() all_images_scores = list() assert n_priors == predicted_locs.size(1) == predicted_scores.size(1) for i in range(batch_size): decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) image_boxes = list() image_labels = list() image_scores = list() _max_scores, _best_label = predicted_scores[i].max(dim=1) for c in range(1, self.n_classes): class_scores = predicted_scores[i][:, c] score_above_min_score = class_scores > min_score n_above_min_score = score_above_min_score.sum().item() if n_above_min_score == 0: continue class_scores = class_scores[score_above_min_score] class_decoded_locs = decoded_locs[score_above_min_score] class_scores, sort_ind = class_scores.sort(dim=0, descending=True) class_decoded_locs = class_decoded_locs[sort_ind] overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) suppress = torch.zeros(n_above_min_score, dtype=torch.uint8) for box in range(class_decoded_locs.size(0)): if suppress[box] == 1: continue suppress = torch.max(suppress, overlap[box] > max_overlap) suppress[box] = 0 image_boxes.append(class_decoded_locs[1 - suppress]) image_labels.append(torch.LongTensor((1 - suppress).sum(). item() * [c])) image_scores.append(class_scores[1 - suppress]) if len(image_boxes) == 0: image_boxes.append(torch.FloatTensor([[0.0, 0.0, 1.0, 1.0]])) image_labels.append(torch.LongTensor([0])) image_scores.append(torch.FloatTensor([0.0])) image_boxes = torch.cat(image_boxes, dim=0) image_labels = torch.cat(image_labels, dim=0) image_scores = torch.cat(image_scores, dim=0) n_objects = image_scores.size(0) if n_objects > top_k: image_scores, sort_ind = image_scores.sort(dim=0, descending=True) image_scores = image_scores[:top_k] image_boxes = image_boxes[sort_ind][:top_k] image_labels = image_labels[sort_ind][:top_k] all_images_boxes.append(image_boxes) all_images_labels.append(image_labels) all_images_scores.append(image_scores) return all_images_boxes, all_images_labels, all_images_scores def get_inputs(): return [torch.rand([4, 3, 512, 512])] def get_init_inputs(): return [[], {'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torchvision from torch import nn import torch.nn.functional as F from math import sqrt from itertools import product as product import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 262144 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 1024 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 1024 * x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (512 + 2 * x0 + 1024 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (513 + 2 * x0 + 1024 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 65536 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 512 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 512 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (256 + 2 * x0 + 512 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (257 + 2 * x0 + 512 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16384 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 256 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 256 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (128 + 2 * x0 + 256 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (129 + 2 * x0 + 256 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 32 % 32 x0 = xindex % 32 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-33 + x4), tmp10, other=float('-inf')) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-32 + x4), tmp16, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-31 + x4), tmp23, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x4, tmp33, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (31 + x4), tmp43, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (32 + x4), tmp46, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (33 + x4), tmp49, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tl.store(out_ptr0 + x4, tmp51, None) tl.store(out_ptr1 + x4, tmp76, None) @triton.jit def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_red_fused_pow_sqrt_sum_11(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = xindex // 4096 _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 2097152 * x1), rmask, eviction_policy='evict_first', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tmp5 = libdevice.sqrt(tmp3) tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp5, None) @triton.jit def triton_poi_fused_div_mul_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 2097152 x1 = xindex // 4096 % 512 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x3, tmp2, None) tl.store(out_ptr1 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 36 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 36 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 394496 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 24656 x0 = xindex % 4 x2 = xindex // 98624 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 16384, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4096 * ((x0 + 4 * x1) % 16) + 65536 * ((x0 + 4 * x1 + 65536 * x2) // 65536 % 4) + (x0 + 4 * x1) // 16 % 4096), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0 + 4 * x1) % 16, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 22528, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tmp10 & tmp12 tmp14 = tl.load(in_ptr2 + (1024 * ((x0 + 4 * (-16384 + x1)) % 24) + 24576 * ((x0 + 4 * (-16384 + x1) + 24576 * x2) // 24576 % 4) + (x0 + 4 * (-16384 + x1)) // 24 % 1024), tmp13 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.load(in_ptr3 + (x0 + 4 * (-16384 + x1)) % 24, tmp13 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp14 + tmp15 tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp13, tmp16, tmp17) tmp19 = tmp0 >= tmp11 tmp20 = tl.full([1], 24064, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp23 = tl.load(in_ptr4 + (256 * ((x0 + 4 * (-22528 + x1)) % 24) + 6144 * ((x0 + 4 * (-22528 + x1) + 6144 * x2) // 6144 % 4) + (x0 + 4 * (- 22528 + x1)) // 24 % 256), tmp22 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tl.load(in_ptr5 + (x0 + 4 * (-22528 + x1)) % 24, tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp22, tmp25, tmp26) tmp28 = tmp0 >= tmp20 tmp29 = tl.full([1], 24448, tl.int64) tmp30 = tmp0 < tmp29 tmp31 = tmp28 & tmp30 tmp32 = tl.load(in_ptr6 + (64 * ((x0 + 4 * (-24064 + x1)) % 24) + 1536 * ((x0 + 4 * (-24064 + x1) + 1536 * x2) // 1536 % 4) + (x0 + 4 * (- 24064 + x1)) // 24 % 64), tmp31 & xmask, eviction_policy= 'evict_last', other=0.0) tmp33 = tl.load(in_ptr7 + (x0 + 4 * (-24064 + x1)) % 24, tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp34 = tmp32 + tmp33 tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype) tmp36 = tl.where(tmp31, tmp34, tmp35) tmp37 = tmp0 >= tmp29 tmp38 = tl.full([1], 24592, tl.int64) tmp39 = tmp0 < tmp38 tmp40 = tmp37 & tmp39 tmp41 = tl.load(in_ptr8 + (36 * ((x0 + 4 * (-24448 + x1)) % 16) + 576 * ((x0 + 4 * (-24448 + x1) + 576 * x2) // 576 % 4) + (x0 + 4 * (- 24448 + x1)) // 16 % 36), tmp40 & xmask, eviction_policy= 'evict_last', other=0.0) tmp42 = tl.load(in_ptr9 + (x0 + 4 * (-24448 + x1)) % 16, tmp40 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 + tmp42 tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype) tmp45 = tl.where(tmp40, tmp43, tmp44) tmp46 = tmp0 >= tmp38 tl.full([1], 24656, tl.int64) tmp49 = tl.load(in_ptr10 + (16 * ((x0 + 4 * (-24592 + x1)) % 16) + 256 * ((x0 + 4 * (-24592 + x1) + 256 * x2) // 256 % 4) + (x0 + 4 * (- 24592 + x1)) // 16 % 16), tmp46 & xmask, eviction_policy= 'evict_last', other=0.0) tmp50 = tl.load(in_ptr11 + (x0 + 4 * (-24592 + x1)) % 16, tmp46 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tmp49 + tmp50 tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype) tmp53 = tl.where(tmp46, tmp51, tmp52) tmp54 = tl.where(tmp40, tmp45, tmp53) tmp55 = tl.where(tmp31, tmp36, tmp54) tmp56 = tl.where(tmp22, tmp27, tmp55) tmp57 = tl.where(tmp13, tmp18, tmp56) tmp58 = tl.where(tmp4, tmp9, tmp57) tl.store(out_ptr0 + x3, tmp58, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 512, 512), (786432, 262144, 512, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (256,), (1,)) assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (512,), (1,)) assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_21, (512,), (1,)) assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_27, (512,), (1,)) assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (1024,), (1,)) assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_31, (1024,), (1,)) assert_size_stride(primals_32, (1, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_34, (256,), (1,)) assert_size_stride(primals_35, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_36, (512,), (1,)) assert_size_stride(primals_37, (128, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_38, (128,), (1,)) assert_size_stride(primals_39, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_40, (256,), (1,)) assert_size_stride(primals_41, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_42, (128,), (1,)) assert_size_stride(primals_43, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_44, (256,), (1,)) assert_size_stride(primals_45, (128, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_46, (128,), (1,)) assert_size_stride(primals_47, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_48, (256,), (1,)) assert_size_stride(primals_49, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_50, (16,), (1,)) assert_size_stride(primals_51, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_52, (24,), (1,)) assert_size_stride(primals_53, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_54, (24,), (1,)) assert_size_stride(primals_55, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_56, (24,), (1,)) assert_size_stride(primals_57, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_58, (16,), (1,)) assert_size_stride(primals_59, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_60, (16,), (1,)) assert_size_stride(primals_61, (16, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_62, (16,), (1,)) assert_size_stride(primals_63, (24, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_64, (24,), (1,)) assert_size_stride(primals_65, (24, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_66, (24,), (1,)) assert_size_stride(primals_67, (24, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_68, (24,), (1,)) assert_size_stride(primals_69, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_70, (16,), (1,)) assert_size_stride(primals_71, (16, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_72, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 512, 512), (16777216, 262144, 512, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(67108864)](buf1, primals_2, 67108864, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 512, 512), (16777216, 262144, 512, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_0[grid(67108864)](buf3, primals_5, 67108864, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(16777216)](buf3, buf4, buf5, 16777216, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 256, 256), (8388608, 65536, 256, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_2[grid(33554432)](buf7, primals_7, 33554432, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 256, 256), (8388608, 65536, 256, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_2[grid(33554432)](buf9, primals_9, 33554432, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(8388608)](buf9, buf10, buf11, 8388608, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 128, 128), (4194304, 16384, 128, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(16777216)](buf13, primals_11, 16777216, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 128, 128), (4194304, 16384, 128, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_relu_4[grid(16777216)](buf15, primals_13, 16777216, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 256, 128, 128), (4194304, 16384, 128, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_relu_4[grid(16777216)](buf17, primals_15, 16777216, XBLOCK=512, num_warps=8, num_stages=1) del primals_15 buf18 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32) buf19 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(4194304)](buf17, buf18, buf19, 4194304, XBLOCK=512, num_warps=8, num_stages=1) buf20 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 512, 64, 64), (2097152, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_relu_6[grid(8388608)](buf21, primals_17, 8388608, XBLOCK=512, num_warps=8, num_stages=1) del primals_17 buf22 = extern_kernels.convolution(buf21, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 512, 64, 64), (2097152, 4096, 64, 1)) buf23 = buf22 del buf22 triton_poi_fused_convolution_relu_6[grid(8388608)](buf23, primals_19, 8388608, XBLOCK=512, num_warps=8, num_stages=1) del primals_19 buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 512, 64, 64), (2097152, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_6[grid(8388608)](buf25, primals_21, 8388608, XBLOCK=512, num_warps=8, num_stages=1) del primals_21 buf26 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32) buf27 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_7[grid(2097152)](buf25, buf26, buf27, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf28 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 32, 32), (524288, 1024, 32, 1)) buf29 = buf28 del buf28 triton_poi_fused_convolution_relu_8[grid(2097152)](buf29, primals_23, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 512, 32, 32), (524288, 1024, 32, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_relu_8[grid(2097152)](buf31, primals_25, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 512, 32, 32), (524288, 1024, 32, 1)) buf33 = buf32 del buf32 triton_poi_fused_convolution_relu_8[grid(2097152)](buf33, primals_27, 2097152, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 buf34 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32) buf35 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_9[grid(2097152)](buf33, buf34, buf35, 2097152, XBLOCK=512, num_warps=8, num_stages=1) buf36 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 1024, 32, 32), (1048576, 1024, 32, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_relu_10[grid(4194304)](buf37, primals_29, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_29 buf38 = extern_kernels.convolution(buf37, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 1024, 32, 32), (1048576, 1024, 32, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_relu_10[grid(4194304)](buf39, primals_31, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_31 buf40 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf41 = reinterpret_tensor(buf40, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf40 triton_red_fused_pow_sqrt_sum_11[grid(16384)](buf41, buf25, 16384, 512, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) buf42 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) buf43 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32) triton_poi_fused_div_mul_12[grid(8388608)](buf25, buf41, primals_32, buf42, buf43, 8388608, XBLOCK=512, num_warps=8, num_stages=1) buf44 = extern_kernels.convolution(buf39, primals_33, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 256, 32, 32), (262144, 1024, 32, 1)) buf45 = buf44 del buf44 triton_poi_fused_convolution_relu_13[grid(1048576)](buf45, primals_34, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_34 buf46 = extern_kernels.convolution(buf45, primals_35, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 512, 16, 16), (131072, 256, 16, 1)) buf47 = buf46 del buf46 triton_poi_fused_convolution_relu_14[grid(524288)](buf47, primals_36, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_36 buf48 = extern_kernels.convolution(buf47, primals_37, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 128, 16, 16), (32768, 256, 16, 1)) buf49 = buf48 del buf48 triton_poi_fused_convolution_relu_15[grid(131072)](buf49, primals_38, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_38 buf50 = extern_kernels.convolution(buf49, primals_39, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 256, 8, 8), (16384, 64, 8, 1)) buf51 = buf50 del buf50 triton_poi_fused_convolution_relu_16[grid(65536)](buf51, primals_40, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_40 buf52 = extern_kernels.convolution(buf51, primals_41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 128, 8, 8), (8192, 64, 8, 1)) buf53 = buf52 del buf52 triton_poi_fused_convolution_relu_17[grid(32768)](buf53, primals_42, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_42 buf54 = extern_kernels.convolution(buf53, primals_43, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf54, (4, 256, 6, 6), (9216, 36, 6, 1)) buf55 = buf54 del buf54 triton_poi_fused_convolution_relu_18[grid(36864)](buf55, primals_44, 36864, XBLOCK=256, num_warps=4, num_stages=1) del primals_44 buf56 = extern_kernels.convolution(buf55, primals_45, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf56, (4, 128, 6, 6), (4608, 36, 6, 1)) buf57 = buf56 del buf56 triton_poi_fused_convolution_relu_19[grid(18432)](buf57, primals_46, 18432, XBLOCK=256, num_warps=4, num_stages=1) del primals_46 buf58 = extern_kernels.convolution(buf57, primals_47, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf58, (4, 256, 4, 4), (4096, 16, 4, 1)) buf59 = buf58 del buf58 triton_poi_fused_convolution_relu_20[grid(16384)](buf59, primals_48, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_48 buf60 = extern_kernels.convolution(buf43, primals_49, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf60, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf61 = extern_kernels.convolution(buf39, primals_51, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf61, (4, 24, 32, 32), (24576, 1024, 32, 1)) buf62 = extern_kernels.convolution(buf47, primals_53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf62, (4, 24, 16, 16), (6144, 256, 16, 1)) buf63 = extern_kernels.convolution(buf51, primals_55, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf63, (4, 24, 8, 8), (1536, 64, 8, 1)) buf64 = extern_kernels.convolution(buf55, primals_57, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 16, 6, 6), (576, 36, 6, 1)) buf65 = extern_kernels.convolution(buf59, primals_59, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf65, (4, 16, 4, 4), (256, 16, 4, 1)) buf66 = extern_kernels.convolution(buf43, primals_61, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf66, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf67 = extern_kernels.convolution(buf39, primals_63, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf67, (4, 24, 32, 32), (24576, 1024, 32, 1)) buf68 = extern_kernels.convolution(buf47, primals_65, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf68, (4, 24, 16, 16), (6144, 256, 16, 1)) buf69 = extern_kernels.convolution(buf51, primals_67, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf69, (4, 24, 8, 8), (1536, 64, 8, 1)) buf70 = extern_kernels.convolution(buf55, primals_69, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf70, (4, 16, 6, 6), (576, 36, 6, 1)) buf71 = extern_kernels.convolution(buf59, primals_71, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf71, (4, 16, 4, 4), (256, 16, 4, 1)) buf72 = empty_strided_cuda((4, 24656, 4), (98624, 4, 1), torch.float32) triton_poi_fused_cat_21[grid(394496)](buf60, primals_50, buf61, primals_52, buf62, primals_54, buf63, primals_56, buf64, primals_58, buf65, primals_60, buf72, 394496, XBLOCK=1024, num_warps=4, num_stages=1) del buf60 del buf61 del buf62 del buf63 del buf64 del buf65 del primals_50 del primals_52 del primals_54 del primals_56 del primals_58 del primals_60 buf73 = empty_strided_cuda((4, 24656, 4), (98624, 4, 1), torch.float32) triton_poi_fused_cat_21[grid(394496)](buf66, primals_62, buf67, primals_64, buf68, primals_66, buf69, primals_68, buf70, primals_70, buf71, primals_72, buf73, 394496, XBLOCK=1024, num_warps=4, num_stages=1) del buf66 del buf67 del buf68 del buf69 del buf70 del buf71 del primals_62 del primals_64 del primals_66 del primals_68 del primals_70 del primals_72 return (buf72, buf73, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_33, primals_35, primals_37, primals_39, primals_41, primals_43, primals_45, primals_47, primals_49, primals_51, primals_53, primals_55, primals_57, primals_59, primals_61, primals_63, primals_65, primals_67, primals_69, primals_71, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf25, buf26, buf27, buf29, buf31, buf33, buf34, buf35, buf37, buf39, buf41, buf42, buf43, buf45, buf47, buf49, buf51, buf53, buf55, buf57, buf59) def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor def cxcy_to_xy(cxcy): """ Convert bounding boxes from center-size coordinates (c_x, c_y, w, h) to boundary coordinates (x_min, y_min, x_max, y_max). :param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4) :return: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4) """ return torch.cat([cxcy[:, :2] - cxcy[:, 2:] / 2, cxcy[:, :2] + cxcy[:, 2:] / 2], 1) def find_intersection(set_1, set_2): """ Find the intersection of every box combination between two sets of boxes that are in boundary coordinates. :param set_1: set 1, a tensor of dimensions (n1, 4) :param set_2: set 2, a tensor of dimensions (n2, 4) :return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2) """ lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2]. unsqueeze(0)) upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:]. unsqueeze(0)) intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] def find_jaccard_overlap(set_1, set_2): """ Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates. :param set_1: set 1, a tensor of dimensions (n1, 4) :param set_2: set 2, a tensor of dimensions (n2, 4) :return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2) """ intersection = find_intersection(set_1, set_2) areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1]) areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1]) union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection return intersection / union def gcxgcy_to_cxcy(gcxgcy, priors_cxcy): """ Decode bounding box coordinates predicted by the model, since they are encoded in the form mentioned above. They are decoded into center-size coordinates. This is the inverse of the function above. :param gcxgcy: encoded bounding boxes, i.e. output of the model, a tensor of size (n_priors, 4) :param priors_cxcy: prior boxes with respect to which the encoding is defined, a tensor of size (n_priors, 4) :return: decoded bounding boxes in center-size form, a tensor of size (n_priors, 4) """ return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy [:, :2], torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1) class VGGBase(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBase, self).__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) self.load_pretrained_layers() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 300, 300) :return: lower-level feature maps conv4_3 and conv7 """ out = F.relu(self.conv1_1(image)) out = F.relu(self.conv1_2(out)) out = self.pool1(out) out = F.relu(self.conv2_1(out)) out = F.relu(self.conv2_2(out)) out = self.pool2(out) out = F.relu(self.conv3_1(out)) out = F.relu(self.conv3_2(out)) out = F.relu(self.conv3_3(out)) out = self.pool3(out) out = F.relu(self.conv4_1(out)) out = F.relu(self.conv4_2(out)) out = F.relu(self.conv4_3(out)) conv4_3_feats = out out = self.pool4(out) out = F.relu(self.conv5_1(out)) out = F.relu(self.conv5_2(out)) out = F.relu(self.conv5_3(out)) out = self.pool5(out) out = F.relu(self.conv6(out)) conv7_feats = F.relu(self.conv7(out)) return conv4_3_feats, conv7_feats def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16 We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = torchvision.models.vgg16(pretrained=True ).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) for i, param in enumerate(param_names[:-4]): state_dict[param] = pretrained_state_dict[pretrained_param_names[i] ] conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view( 4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view( 4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) self.load_state_dict(state_dict) None class AuxiliaryConvolutions(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super(AuxiliaryConvolutions, self).__init__() self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0) self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv7_feats): """ Forward propagation. :param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19) :return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2 """ out = F.relu(self.conv8_1(conv7_feats)) out = F.relu(self.conv8_2(out)) conv8_2_feats = out out = F.relu(self.conv9_1(out)) out = F.relu(self.conv9_2(out)) conv9_2_feats = out out = F.relu(self.conv10_1(out)) out = F.relu(self.conv10_2(out)) conv10_2_feats = out out = F.relu(self.conv11_1(out)) conv11_2_feats = F.relu(self.conv11_2(out)) return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats class PredictionConvolutions(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes. See 'cxcy_to_gcxgcy' in utils.py for the encoding definition. The class scores represent the scores of each object class in each of the 8732 bounding boxes located. A high score for 'background' = no object. """ def __init__(self, n_classes): """ :param n_classes: number of different types of objects """ super(PredictionConvolutions, self).__init__() self.n_classes = n_classes n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 4, 'conv11_2': 4} self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1) self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size= 3, padding=1) self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1) self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1) self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1) self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.0) def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats): """ Forward propagation. :param conv4_3_feats: conv4_3 feature map, a tensor of dimensions (N, 512, 38, 38) :param conv7_feats: conv7 feature map, a tensor of dimensions (N, 1024, 19, 19) :param conv8_2_feats: conv8_2 feature map, a tensor of dimensions (N, 512, 10, 10) :param conv9_2_feats: conv9_2 feature map, a tensor of dimensions (N, 256, 5, 5) :param conv10_2_feats: conv10_2 feature map, a tensor of dimensions (N, 256, 3, 3) :param conv11_2_feats: conv11_2 feature map, a tensor of dimensions (N, 256, 1, 1) :return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image """ batch_size = conv4_3_feats.size(0) l_conv4_3 = self.loc_conv4_3(conv4_3_feats) l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous() l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) l_conv7 = self.loc_conv7(conv7_feats) l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() l_conv7 = l_conv7.view(batch_size, -1, 4) l_conv8_2 = self.loc_conv8_2(conv8_2_feats) l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) l_conv9_2 = self.loc_conv9_2(conv9_2_feats) l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) l_conv10_2 = self.loc_conv10_2(conv10_2_feats) l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) l_conv11_2 = self.loc_conv11_2(conv11_2_feats) l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) c_conv4_3 = self.cl_conv4_3(conv4_3_feats) c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous() c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes) c_conv7 = self.cl_conv7(conv7_feats) c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() c_conv7 = c_conv7.view(batch_size, -1, self.n_classes) c_conv8_2 = self.cl_conv8_2(conv8_2_feats) c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) c_conv9_2 = self.cl_conv9_2(conv9_2_feats) c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) c_conv10_2 = self.cl_conv10_2(conv10_2_feats) c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) c_conv11_2 = self.cl_conv11_2(conv11_2_feats) c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1) classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2], dim=1) return locs, classes_scores class SSD300New(nn.Module): """ The SSD300 network - encapsulates the base VGG network, auxiliary, and prediction convolutions. """ def __init__(self, n_classes): super(SSD300New, self).__init__() self.n_classes = n_classes self.base = VGGBase() self.aux_convs = AuxiliaryConvolutions() self.pred_convs = PredictionConvolutions(n_classes) self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) nn.init.constant_(self.rescale_factors, 20) self.priors_cxcy = self.create_prior_boxes() def create_prior_boxes(self): """ Create the 8732 prior (default) boxes for the SSD300, as defined in the paper. :return: prior boxes in center-size coordinates, a tensor of dimensions (8732, 4) """ fmap_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5, 'conv10_2': 3, 'conv11_2': 1} obj_scales = {'conv4_3': 0.1, 'conv7': 0.2, 'conv8_2': 0.375, 'conv9_2': 0.55, 'conv10_2': 0.725, 'conv11_2': 0.9} aspect_ratios = {'conv4_3': [2.0, 4.0, 0.25], 'conv7': [2.0, 4.0, 3.0, 0.25, 0.333], 'conv8_2': [2.0, 4.0, 3.0, 0.25, 0.333], 'conv9_2': [2.0, 4.0, 3.0, 0.25, 0.333], 'conv10_2': [2.0, 4.0, 0.25], 'conv11_2': [2.0, 4.0, 0.25]} fmaps = list(fmap_dims.keys()) prior_boxes = [] for k, fmap in enumerate(fmaps): for i in range(fmap_dims[fmap]): for j in range(fmap_dims[fmap]): cx = (j + 0.5) / fmap_dims[fmap] cy = (i + 0.5) / fmap_dims[fmap] for ratio in aspect_ratios[fmap]: prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt (ratio), obj_scales[fmap] / sqrt(ratio)]) if ratio == 2.0: try: additional_scale = sqrt(obj_scales[fmap] * obj_scales[fmaps[k + 1]]) except IndexError: additional_scale = 1.0 prior_boxes.append([cx, cy, additional_scale, additional_scale]) prior_boxes = torch.FloatTensor(prior_boxes) prior_boxes.clamp_(0, 1) return prior_boxes def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k): """ Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects. For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold. :param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4) :param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes) :param min_score: minimum threshold for a box to be considered a match for a certain class :param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS :param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k' :return: detections (boxes, labels, and scores), lists of length batch_size """ batch_size = predicted_locs.size(0) n_priors = self.priors_cxcy.size(0) predicted_scores = F.softmax(predicted_scores, dim=2) all_images_boxes = list() all_images_labels = list() all_images_scores = list() assert n_priors == predicted_locs.size(1) == predicted_scores.size(1) for i in range(batch_size): decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) image_boxes = list() image_labels = list() image_scores = list() _max_scores, _best_label = predicted_scores[i].max(dim=1) for c in range(1, self.n_classes): class_scores = predicted_scores[i][:, c] score_above_min_score = class_scores > min_score n_above_min_score = score_above_min_score.sum().item() if n_above_min_score == 0: continue class_scores = class_scores[score_above_min_score] class_decoded_locs = decoded_locs[score_above_min_score] class_scores, sort_ind = class_scores.sort(dim=0, descending=True) class_decoded_locs = class_decoded_locs[sort_ind] overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) suppress = torch.zeros(n_above_min_score, dtype=torch.uint8) for box in range(class_decoded_locs.size(0)): if suppress[box] == 1: continue suppress = torch.max(suppress, overlap[box] > max_overlap) suppress[box] = 0 image_boxes.append(class_decoded_locs[1 - suppress]) image_labels.append(torch.LongTensor((1 - suppress).sum(). item() * [c])) image_scores.append(class_scores[1 - suppress]) if len(image_boxes) == 0: image_boxes.append(torch.FloatTensor([[0.0, 0.0, 1.0, 1.0]])) image_labels.append(torch.LongTensor([0])) image_scores.append(torch.FloatTensor([0.0])) image_boxes = torch.cat(image_boxes, dim=0) image_labels = torch.cat(image_labels, dim=0) image_scores = torch.cat(image_scores, dim=0) n_objects = image_scores.size(0) if n_objects > top_k: image_scores, sort_ind = image_scores.sort(dim=0, descending=True) image_scores = image_scores[:top_k] image_boxes = image_boxes[sort_ind][:top_k] image_labels = image_labels[sort_ind][:top_k] all_images_boxes.append(image_boxes) all_images_labels.append(image_labels) all_images_scores.append(image_scores) return all_images_boxes, all_images_labels, all_images_scores def forward(self, input_0): primals_32 = self.rescale_factors primals_1 = self.base.conv1_1.weight primals_2 = self.base.conv1_1.bias primals_4 = self.base.conv1_2.weight primals_5 = self.base.conv1_2.bias primals_6 = self.base.conv2_1.weight primals_7 = self.base.conv2_1.bias primals_8 = self.base.conv2_2.weight primals_9 = self.base.conv2_2.bias primals_10 = self.base.conv3_1.weight primals_11 = self.base.conv3_1.bias primals_12 = self.base.conv3_2.weight primals_13 = self.base.conv3_2.bias primals_14 = self.base.conv3_3.weight primals_15 = self.base.conv3_3.bias primals_16 = self.base.conv4_1.weight primals_17 = self.base.conv4_1.bias primals_18 = self.base.conv4_2.weight primals_19 = self.base.conv4_2.bias primals_20 = self.base.conv4_3.weight primals_21 = self.base.conv4_3.bias primals_22 = self.base.conv5_1.weight primals_23 = self.base.conv5_1.bias primals_24 = self.base.conv5_2.weight primals_25 = self.base.conv5_2.bias primals_26 = self.base.conv5_3.weight primals_27 = self.base.conv5_3.bias primals_28 = self.base.conv6.weight primals_29 = self.base.conv6.bias primals_30 = self.base.conv7.weight primals_31 = self.base.conv7.bias primals_33 = self.aux_convs.conv8_1.weight primals_34 = self.aux_convs.conv8_1.bias primals_35 = self.aux_convs.conv8_2.weight primals_36 = self.aux_convs.conv8_2.bias primals_37 = self.aux_convs.conv9_1.weight primals_38 = self.aux_convs.conv9_1.bias primals_39 = self.aux_convs.conv9_2.weight primals_40 = self.aux_convs.conv9_2.bias primals_41 = self.aux_convs.conv10_1.weight primals_42 = self.aux_convs.conv10_1.bias primals_43 = self.aux_convs.conv10_2.weight primals_44 = self.aux_convs.conv10_2.bias primals_45 = self.aux_convs.conv11_1.weight primals_46 = self.aux_convs.conv11_1.bias primals_47 = self.aux_convs.conv11_2.weight primals_48 = self.aux_convs.conv11_2.bias primals_49 = self.pred_convs.loc_conv4_3.weight primals_50 = self.pred_convs.loc_conv4_3.bias primals_51 = self.pred_convs.loc_conv7.weight primals_52 = self.pred_convs.loc_conv7.bias primals_53 = self.pred_convs.loc_conv8_2.weight primals_54 = self.pred_convs.loc_conv8_2.bias primals_55 = self.pred_convs.loc_conv9_2.weight primals_56 = self.pred_convs.loc_conv9_2.bias primals_57 = self.pred_convs.loc_conv10_2.weight primals_58 = self.pred_convs.loc_conv10_2.bias primals_59 = self.pred_convs.loc_conv11_2.weight primals_60 = self.pred_convs.loc_conv11_2.bias primals_61 = self.pred_convs.cl_conv4_3.weight primals_62 = self.pred_convs.cl_conv4_3.bias primals_63 = self.pred_convs.cl_conv7.weight primals_64 = self.pred_convs.cl_conv7.bias primals_65 = self.pred_convs.cl_conv8_2.weight primals_66 = self.pred_convs.cl_conv8_2.bias primals_67 = self.pred_convs.cl_conv9_2.weight primals_68 = self.pred_convs.cl_conv9_2.bias primals_69 = self.pred_convs.cl_conv10_2.weight primals_70 = self.pred_convs.cl_conv10_2.bias primals_71 = self.pred_convs.cl_conv11_2.weight primals_72 = self.pred_convs.cl_conv11_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72]) return output[0], output[1]
aarashfeizi/a-PyTorch-Tutorial-to-Object-Detection
SSD300
false
1,907
[ "MIT" ]
0
a9e1f3092d4b8c094bff5cd0897e0e3c1e0bc9c2
https://github.com/aarashfeizi/a-PyTorch-Tutorial-to-Object-Detection/tree/a9e1f3092d4b8c094bff5cd0897e0e3c1e0bc9c2
import torch import torchvision from torch import nn import torch.nn.functional as F from math import sqrt from itertools import product as product import torch.optim import torch.utils.data def decimate(tensor, m): """ Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value. This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size. :param tensor: tensor to be decimated :param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension :return: decimated tensor """ assert tensor.dim() == len(m) for d in range(tensor.dim()): if m[d] is not None: tensor = tensor.index_select(dim=d, index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) return tensor def cxcy_to_xy(cxcy): """ Convert bounding boxes from center-size coordinates (c_x, c_y, w, h) to boundary coordinates (x_min, y_min, x_max, y_max). :param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4) :return: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4) """ return torch.cat([cxcy[:, :2] - cxcy[:, 2:] / 2, cxcy[:, :2] + cxcy[:, 2:] / 2], 1) def find_intersection(set_1, set_2): """ Find the intersection of every box combination between two sets of boxes that are in boundary coordinates. :param set_1: set 1, a tensor of dimensions (n1, 4) :param set_2: set 2, a tensor of dimensions (n2, 4) :return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2) """ lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2]. unsqueeze(0)) upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:]. unsqueeze(0)) intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0) return intersection_dims[:, :, 0] * intersection_dims[:, :, 1] def find_jaccard_overlap(set_1, set_2): """ Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates. :param set_1: set 1, a tensor of dimensions (n1, 4) :param set_2: set 2, a tensor of dimensions (n2, 4) :return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2) """ intersection = find_intersection(set_1, set_2) areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1]) areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1]) union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection return intersection / union def gcxgcy_to_cxcy(gcxgcy, priors_cxcy): """ Decode bounding box coordinates predicted by the model, since they are encoded in the form mentioned above. They are decoded into center-size coordinates. This is the inverse of the function above. :param gcxgcy: encoded bounding boxes, i.e. output of the model, a tensor of size (n_priors, 4) :param priors_cxcy: prior boxes with respect to which the encoding is defined, a tensor of size (n_priors, 4) :return: decoded bounding boxes in center-size form, a tensor of size (n_priors, 4) """ return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy [:, :2], torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1) class VGGBase(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super().__init__() self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.poo # ... truncated (>4000 chars) for memory efficiency
Model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/uu/cuupb7yo7ai64qlwi4lpnlvdf3gw6jlp543potvhemy2bbjwrt5a.py # Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1073741824], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1073741824 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 16777216 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16777216, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1)) assert_size_stride(primals_3, (16777216, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, primals_1, primals_3, buf0, 1073741824, grid=grid(1073741824), stream=stream0) return (buf0, primals_1, primals_2, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16777216, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16777216, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch import torch.nn.functional from torch.nn.parameter import Parameter from torch.nn.modules import Module import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.nn import Parameter from torch.nn import Module class Model(Module): def __init__(self): super(Model, self).__init__() self.a = Parameter(torch.FloatTensor(4096 * 4096).fill_(1.0)) self.b = Parameter(torch.FloatTensor(4096 * 4096).fill_(2.0)) def forward(self, input): return input * self.a * self.b def get_inputs(): return [torch.rand([4, 4, 4, 16777216])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module import torch.nn.functional from torch.nn.parameter import Parameter from torch.nn.modules import Module import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.nn import Parameter from torch.nn import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 16777216 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (16777216,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1)) assert_size_stride(primals_3, (16777216,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 16777216), (268435456, 67108864, 16777216, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(1073741824)](primals_2, primals_1, primals_3, buf0, 1073741824, XBLOCK=1024, num_warps=4, num_stages=1 ) return buf0, primals_1, primals_2, primals_3 class ModelNew(Module): def __init__(self): super(ModelNew, self).__init__() self.a = Parameter(torch.FloatTensor(4096 * 4096).fill_(1.0)) self.b = Parameter(torch.FloatTensor(4096 * 4096).fill_(2.0)) def forward(self, input_0): primals_1 = self.a primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Jovonni/jukebox
Model
false
1,908
[ "MIT" ]
0
965a6f78aae67506a6e4fcdb205e2c39132e12e0
https://github.com/Jovonni/jukebox/tree/965a6f78aae67506a6e4fcdb205e2c39132e12e0
from torch.nn import Module import torch import torch.nn.functional from torch.nn.parameter import Parameter from torch.nn.modules import Module import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torch.nn import Parameter from torch.nn import Module class Model(Module): def __init__(self): super(Model, self).__init__() self.a = Parameter(torch.FloatTensor(4096 * 4096).fill_(1.0)) self.b = Parameter(torch.FloatTensor(4096 * 4096).fill_(2.0)) def forward(self, input): return input * self.a * self.b def get_inputs(): return [torch.rand([4, 4, 4, 16777216])] def get_init_inputs(): return []
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/df/cdfcie57v6pcdd6oeaz4mvlgksxgyuxzmlv5bklwemyulqhtcxta.py # Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mean => mean # mul => mul # std => sqrt, var # sub => sub # truediv => div # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, mul, add, truediv, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class LayerNorm(nn.Module): """ Construye la capa de normalización """ def __init__(self, features, eps=1e-06): """ Constructor LayerNorm Parámetros: features: Tamaño del vector eps: Diferencia para la división """ super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): """ Paso hacia delante de la capa de normalización Parámetros x: Entradas (salidas del encoder) """ mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-06 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): """ Construye la capa de normalización """ def __init__(self, features, eps=1e-06): """ Constructor LayerNorm Parámetros: features: Tamaño del vector eps: Diferencia para la división """ super(LayerNormNew, self).__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, input_0): primals_2 = self.a_2 primals_3 = self.b_2 primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AbeJLazaro/TraductorEspanolOtomi
LayerNorm
false
1,909
[ "MIT" ]
0
75e1558d3b1a7efe9beb3c7d992c3bf1d3d88d0b
https://github.com/AbeJLazaro/TraductorEspanolOtomi/tree/75e1558d3b1a7efe9beb3c7d992c3bf1d3d88d0b
import torch from torch import nn class Model(nn.Module): """ Construye la capa de normalización """ def __init__(self, features, eps=1e-06): """ Constructor LayerNorm Parámetros: features: Tamaño del vector eps: Diferencia para la división """ super().__init__() self.a_2 = nn.Parameter(torch.ones(features)) self.b_2 = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): """ Paso hacia delante de la capa de normalización Parámetros x: Entradas (salidas del encoder) """ mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
GEGLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/g3/cg3jxrez5zd4oyrbdxcja37kso4ls6az3z6q5iucel6jjkrpi3th.py # Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul] # Source node to ATen node mapping: # gelu => add, erf, mul, mul_1, mul_2 # mul => mul_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %getitem), kwargs = {}) triton_poi_fused_gelu_mul_0 = async_compile.triton('triton_poi_fused_gelu_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask) tmp9 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_gelu_mul_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class GEGLU(nn.Module): def forward(self, x): x, gates = x.chunk(2, dim=-1) return F.gelu(gates) * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp9 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_mul_0[grid(128)](arg0_1, buf0, 128, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GEGLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlansBoyHeart/vit-pytorch
GEGLU
false
1,910
[ "MIT" ]
0
1959adae0bdd7801475bba34d7d61bdc529b4616
https://github.com/AlansBoyHeart/vit-pytorch/tree/1959adae0bdd7801475bba34d7d61bdc529b4616
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def forward(self, x): x, gates = x.chunk(2, dim=-1) return F.gelu(gates) * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Generator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/32/c32vfxouqe74ea5scuzrdhpd7r6adxwu4bzarm4icjfnb47jbizg.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0) del buf1 return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class Generator(nn.Module): """ Define el paso de generación lineal + softmax """ def __init__(self, d_model, vocab): """ Constructor del generador lineal Parámetros: d_model: Dimensión del modelo vocab: Tamaño del vocabulario """ super(Generator, self).__init__() self.proj = nn.Linear(d_model, vocab) def forward(self, x): """ Paso hacia delante del generador Parámetros: x: Entradas para el generador (la salida de los decodificadores) return probabilidades generadas por softmax """ return F.log_softmax(self.proj(x), dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'vocab': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class GeneratorNew(nn.Module): """ Define el paso de generación lineal + softmax """ def __init__(self, d_model, vocab): """ Constructor del generador lineal Parámetros: d_model: Dimensión del modelo vocab: Tamaño del vocabulario """ super(GeneratorNew, self).__init__() self.proj = nn.Linear(d_model, vocab) def forward(self, input_0): primals_1 = self.proj.weight primals_2 = self.proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AbeJLazaro/TraductorEspanolOtomi
Generator
false
1,911
[ "MIT" ]
0
75e1558d3b1a7efe9beb3c7d992c3bf1d3d88d0b
https://github.com/AbeJLazaro/TraductorEspanolOtomi/tree/75e1558d3b1a7efe9beb3c7d992c3bf1d3d88d0b
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): """ Define el paso de generación lineal + softmax """ def __init__(self, d_model, vocab): """ Constructor del generador lineal Parámetros: d_model: Dimensión del modelo vocab: Tamaño del vocabulario """ super().__init__() self.proj = nn.Linear(d_model, vocab) def forward(self, x): """ Paso hacia delante del generador Parámetros: x: Entradas para el generador (la salida de los decodificadores) return probabilidades generadas por softmax """ return F.log_softmax(self.proj(x), dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
BPRLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/uy/cuyeld5sulrg2l2zfz6tdc3pf2qufuofz6b5m6vsk3dr6ka2ymlm.py # Topologically Sorted Source Nodes: [sub, sigmoid, add, log, mean, loss], Original ATen: [aten.sub, aten.sigmoid, aten.add, aten.log, aten.mean, aten.neg] # Source node to ATen node mapping: # add => add # log => log # loss => neg # mean => mean # sigmoid => sigmoid # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%sub,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, 1e-10), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%log,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) triton_per_fused_add_log_mean_neg_sigmoid_sub_0 = async_compile.triton('triton_per_fused_add_log_mean_neg_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_neg_sigmoid_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_log_mean_neg_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = 1e-10 tmp5 = tmp3 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = -tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, sigmoid, add, log, mean, loss], Original ATen: [aten.sub, aten.sigmoid, aten.add, aten.log, aten.mean, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_add_log_mean_neg_sigmoid_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BPRLoss(nn.Module): """ BPRLoss, based on Bayesian Personalized Ranking Args: - gamma(float): Small value to avoid division by zero Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = BPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self, gamma=1e-10): super(BPRLoss, self).__init__() self.gamma = gamma def forward(self, pos_score, neg_score): loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score) ).mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_neg_sigmoid_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = 1e-10 tmp5 = tmp3 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = 256.0 tmp11 = tmp9 / tmp10 tmp12 = -tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mean_neg_sigmoid_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BPRLossNew(nn.Module): """ BPRLoss, based on Bayesian Personalized Ranking Args: - gamma(float): Small value to avoid division by zero Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = BPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self, gamma=1e-10): super(BPRLossNew, self).__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Ahren09/RecBole
BPRLoss
false
1,912
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn class Model(nn.Module): """ BPRLoss, based on Bayesian Personalized Ranking Args: - gamma(float): Small value to avoid division by zero Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = BPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self, gamma=1e-10): super().__init__() self.gamma = gamma def forward(self, pos_score, neg_score): loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score) ).mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
PixelNormLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ot/cotu2ivn5oifbezvztnybj4hxdzlduw4unots44b65tvh2c2a6wn.py # Topologically Sorted Source Nodes: [pow_1, mean, add, sqrt, truediv], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add => add # mean => mean # pow_1 => pow_1 # sqrt => sqrt # truediv => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt), kwargs = {}) triton_poi_fused_add_div_mean_pow_sqrt_0 = async_compile.triton('triton_poi_fused_add_div_mean_pow_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_pow_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_pow_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = tmp0 / tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, mean, add, sqrt, truediv], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_pow_sqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PixelNormLayer(nn.Module): def __init__(self): super(PixelNormLayer, self).__init__() def forward(self, x): return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + 1e-08) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_pow_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = tmp0 / tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_pow_sqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PixelNormLayerNew(nn.Module): def __init__(self): super(PixelNormLayerNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AleksiKnuutila/ganspace
PixelNormLayer
false
1,913
[ "Apache-2.0" ]
0
23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
https://github.com/AleksiKnuutila/ganspace/tree/23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + 1e-08) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
L2Norm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/jl/cjlxnxxaiarviom2nt4mvz43fs6igit2tnfokz3h6ianosaulkoi.py # Topologically Sorted Source Nodes: [norm, norm_1, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] # Source node to ATen node mapping: # norm => pow_1, pow_2, sum_1 # norm_1 => clamp_min # truediv => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %clamp_min), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, norm_1, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class L2Norm(nn.Module): def forward(self, x, eps=1e-06): norm = x.norm(dim=1, keepdim=True).clamp(min=eps) return x / norm def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class L2NormNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AlansBoyHeart/vit-pytorch
L2Norm
false
1,914
[ "MIT" ]
0
1959adae0bdd7801475bba34d7d61bdc529b4616
https://github.com/AlansBoyHeart/vit-pytorch/tree/1959adae0bdd7801475bba34d7d61bdc529b4616
import torch import torch.nn as nn class Model(nn.Module): def forward(self, x, eps=1e-06): norm = x.norm(dim=1, keepdim=True).clamp(min=eps) return x / norm def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
PositionwiseFeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class PositionwiseFeedForward(nn.Module): """ Implementa la ecuación del feed_forward networks para el transformer """ """ Se implementa una red de dos capas sencillas con una ReLU en medio FFN(x) = max(0, xW_1 + b_1)W_x + b_2 """ def __init__(self, d_model, d_ff, dropout=0.1): """ Constructor del bloque o capa FFN Parámetros d_model: Dimensión del modelo d_ff: Dimensión del feed_forward dropout: Probabilidad de dropout """ super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): """ Paso hacia delante Parámetros: x: Entradas """ return self.w_2(self.dropout(F.relu(self.w_1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3 class PositionwiseFeedForwardNew(nn.Module): """ Implementa la ecuación del feed_forward networks para el transformer """ """ Se implementa una red de dos capas sencillas con una ReLU en medio FFN(x) = max(0, xW_1 + b_1)W_x + b_2 """ def __init__(self, d_model, d_ff, dropout=0.1): """ Constructor del bloque o capa FFN Parámetros d_model: Dimensión del modelo d_ff: Dimensión del feed_forward dropout: Probabilidad de dropout """ super(PositionwiseFeedForwardNew, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.w_1.weight primals_2 = self.w_1.bias primals_4 = self.w_2.weight primals_5 = self.w_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
AbeJLazaro/TraductorEspanolOtomi
PositionwiseFeedForward
false
1,915
[ "MIT" ]
0
75e1558d3b1a7efe9beb3c7d992c3bf1d3d88d0b
https://github.com/AbeJLazaro/TraductorEspanolOtomi/tree/75e1558d3b1a7efe9beb3c7d992c3bf1d3d88d0b
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): """ Implementa la ecuación del feed_forward networks para el transformer """ """ Se implementa una red de dos capas sencillas con una ReLU en medio FFN(x) = max(0, xW_1 + b_1)W_x + b_2 """ def __init__(self, d_model, d_ff, dropout=0.1): """ Constructor del bloque o capa FFN Parámetros d_model: Dimensión del modelo d_ff: Dimensión del feed_forward dropout: Probabilidad de dropout """ super().__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) self.dropout = nn.Dropout(dropout) def forward(self, x): """ Paso hacia delante Parámetros: x: Entradas """ return self.w_2(self.dropout(F.relu(self.w_1(x)))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
DoubleResolutionLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %unsqueeze, %convert_element_type_3]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DoubleResolutionLayer(nn.Module): def forward(self, x): x = nn.functional.interpolate(x, scale_factor=2, mode='nearest') return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class DoubleResolutionLayerNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AleksiKnuutila/ganspace
DoubleResolutionLayer
false
1,916
[ "Apache-2.0" ]
0
23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
https://github.com/AleksiKnuutila/ganspace/tree/23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
import torch import torch.nn as nn class Model(nn.Module): def forward(self, x): x = nn.functional.interpolate(x, scale_factor=2, mode='nearest') return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
NoiseLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/hy/chy2gdvcdlpxio6r2ezu76sbqj6jaum4snkq6izi5zxkdqh3u2yj.py # Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # x => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %randn), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [noise], Original ATen: [aten.randn] buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(primals_1, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NoiseLayer(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, x, noise=None): if noise is None and self.noise is None: noise = torch.randn(x.size(0), 1, x.size(2), x.size(3), device= x.device, dtype=x.dtype) elif noise is None: noise = self.noise x = x + self.weight.view(1, -1, 1, 1) * noise return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 * tmp2 tmp4 = tmp0 + tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.randn.default([4, 1, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf2, buf1 class NoiseLayerNew(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
AleksiKnuutila/ganspace
NoiseLayer
false
1,917
[ "Apache-2.0" ]
0
23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
https://github.com/AleksiKnuutila/ganspace/tree/23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
import torch import torch.nn as nn class Model(nn.Module): """adds noise. noise is per pixel (constant over channels) with per-channel weight""" def __init__(self, channels): super().__init__() self.weight = nn.Parameter(torch.zeros(channels)) self.noise = None def forward(self, x, noise=None): if noise is None and self.noise is None: noise = torch.randn(x.size(0), 1, x.size(2), x.size(3), device= x.device, dtype=x.dtype) elif noise is None: noise = self.noise x = x + self.weight.view(1, -1, 1, 1) * noise return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
SEModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean] # Source node to ATen node mapping: # x => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ad/cadccuyhl7stcp3nyqfgohiwbiv5ckfzxsye27ithwsill6dvmh4.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_1 => convolution # x_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/lp/clprvnh5p6cmadxtwzizwydrpjlwxohxixbw4ntucp6srbu6gtis.py # Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # x_4 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, ), (1, )) assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 4, grid=grid(4), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_3.run(primals_1, buf5, buf6, 256, grid=grid(256), stream=stream0) return (buf6, primals_1, primals_2, primals_4, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch import optim as optim class SEModule(nn.Module): def __init__(self, channels, channel_reduction): super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d(channels, channels // channel_reduction, kernel_size=1, padding=0) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // channel_reduction, channels, kernel_size=1, padding=0) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'channel_reduction': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 1, 1), (1, 1, 1, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(4)](buf3, primals_3, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf6, primals_1, primals_2, primals_4, buf1, buf3, buf5 class SEModuleNew(nn.Module): def __init__(self, channels, channel_reduction): super(SEModuleNew, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d(channels, channels // channel_reduction, kernel_size=1, padding=0) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // channel_reduction, channels, kernel_size=1, padding=0) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
AdityaSidharta/kaggle_humpback_new_whale
SEModule
false
1,918
[ "MIT" ]
0
779d60746f8eba99d0336836200150fa7a08388e
https://github.com/AdityaSidharta/kaggle_humpback_new_whale/tree/779d60746f8eba99d0336836200150fa7a08388e
import torch import torch.nn as nn from torch import optim as optim class Model(nn.Module): def __init__(self, channels, channel_reduction): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d(channels, channels // channel_reduction, kernel_size=1, padding=0) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // channel_reduction, channels, kernel_size=1, padding=0) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
OuterProductLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/p6/cp66pqi5yaj5elbfua3qtyhcywwcmpiswdj7u3jzry2i62aro25j.py # Topologically Sorted Source Nodes: [p_1, p_2], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # p_1 => mul # p_2 => sum_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x2 = (xindex // 24) x3 = xindex % 24 x4 = xindex tmp18 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last') tmp0 = x0 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + ((4*tmp16) + (16*x2)), xmask, eviction_policy='evict_last') tmp19 = tmp17 * tmp18 tmp20 = tl.load(in_ptr0 + (1 + (4*tmp16) + (16*x2)), xmask, eviction_policy='evict_last') tmp22 = tmp20 * tmp21 tmp23 = tmp19 + tmp22 tmp24 = tl.load(in_ptr0 + (2 + (4*tmp16) + (16*x2)), xmask, eviction_policy='evict_last') tmp26 = tmp24 * tmp25 tmp27 = tmp23 + tmp26 tmp28 = tl.load(in_ptr0 + (3 + (4*tmp16) + (16*x2)), xmask, eviction_policy='evict_last') tmp30 = tmp28 * tmp29 tmp31 = tmp27 + tmp30 tl.store(out_ptr0 + (x4), tmp31, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/6a/c6asanugzoupldkrdkisv2ufakttxezcrdo7m3cqduqfcbra4cbj.py # Topologically Sorted Source Nodes: [q, outer_product, sum_2], Original ATen: [aten.index, aten.mul, aten.sum] # Source node to ATen node mapping: # outer_product => mul_1 # q => index_1 # sum_2 => sum_2 # Graph fragment: # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_1, [None, %lift_fresh_copy_1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %index_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {}) triton_poi_fused_index_mul_sum_1 = async_compile.triton('triton_poi_fused_index_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (24*x1)), xmask) tmp19 = tl.load(in_ptr0 + (6 + x0 + (24*x1)), xmask) tmp23 = tl.load(in_ptr0 + (12 + x0 + (24*x1)), xmask) tmp27 = tl.load(in_ptr0 + (18 + x0 + (24*x1)), xmask) tmp1 = x0 tmp2 = tl.full([1], 3, tl.int64) tmp3 = tmp1 < tmp2 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.full([1], 2, tl.int64) tmp7 = tmp1 < tmp6 tmp8 = tl.where(tmp7, tmp6, tmp2) tmp9 = tl.where(tmp5, tmp4, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp1 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp1 < tmp12 tmp14 = tl.where(tmp13, tmp2, tmp2) tmp15 = tl.where(tmp11, tmp6, tmp14) tmp16 = tl.where(tmp3, tmp9, tmp15) tmp17 = tl.load(in_ptr1 + ((4*tmp16) + (16*x1)), xmask, eviction_policy='evict_last') tmp18 = tmp0 * tmp17 tmp20 = tl.load(in_ptr1 + (1 + (4*tmp16) + (16*x1)), xmask, eviction_policy='evict_last') tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp24 = tl.load(in_ptr1 + (2 + (4*tmp16) + (16*x1)), xmask, eviction_policy='evict_last') tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp28 = tl.load(in_ptr1 + (3 + (4*tmp16) + (16*x1)), xmask, eviction_policy='evict_last') tmp29 = tmp27 * tmp28 tmp30 = tmp26 + tmp29 tl.store(out_ptr0 + (x2), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 6, 4), (24, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [p_1, p_2], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_mul_sum_0.run(primals_1, primals_2, buf0, 96, grid=grid(96), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, 6), (6, 1), torch.float32) # Topologically Sorted Source Nodes: [q, outer_product, sum_2], Original ATen: [aten.index, aten.mul, aten.sum] triton_poi_fused_index_mul_sum_1.run(buf0, primals_1, buf1, 24, grid=grid(24), stream=stream0) del buf0 return (buf1, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 6, 4), (24, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class OuterProductLayer(nn.Module): """OuterProduct Layer used in PNN. This implementation is adapted from code that the author of the paper published on https://github.com/Atomu2014/product-nets. """ def __init__(self, num_feature_field, embedding_size, device): """ Args: num_feature_field(int) :number of feature fields. embedding_size(int) :number of embedding size. device(torch.device) : device object of the model. """ super(OuterProductLayer, self).__init__() self.num_feature_field = num_feature_field num_pairs = int(num_feature_field * (num_feature_field - 1) / 2) embed_size = embedding_size self.kernel = nn.Parameter(torch.rand(embed_size, num_pairs, embed_size), requires_grad=True) nn.init.xavier_uniform_(self.kernel) self def forward(self, feat_emb): """ Args: feat_emb(torch.FloatTensor) :3D tensor with shape: [batch_size,num_pairs,embedding_size]. Returns: outer_product(torch.FloatTensor): The outer product of input tensor. shape of [batch_size, num_pairs] """ row = [] col = [] for i in range(self.num_feature_field - 1): for j in range(i + 1, self.num_feature_field): row.append(i) col.append(j) p = feat_emb[:, row] q = feat_emb[:, col] p.unsqueeze_(dim=1) p = torch.mul(p, self.kernel.unsqueeze(0)) p = torch.sum(p, dim=-1) p = torch.transpose(p, 2, 1) outer_product = p * q return outer_product.sum(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_feature_field': 4, 'embedding_size': 4, 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x2 = xindex // 24 x3 = xindex % 24 x4 = xindex tmp18 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp0 = x0 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (4 * tmp16 + 16 * x2), xmask, eviction_policy ='evict_last') tmp19 = tmp17 * tmp18 tmp20 = tl.load(in_ptr0 + (1 + 4 * tmp16 + 16 * x2), xmask, eviction_policy='evict_last') tmp22 = tmp20 * tmp21 tmp23 = tmp19 + tmp22 tmp24 = tl.load(in_ptr0 + (2 + 4 * tmp16 + 16 * x2), xmask, eviction_policy='evict_last') tmp26 = tmp24 * tmp25 tmp27 = tmp23 + tmp26 tmp28 = tl.load(in_ptr0 + (3 + 4 * tmp16 + 16 * x2), xmask, eviction_policy='evict_last') tmp30 = tmp28 * tmp29 tmp31 = tmp27 + tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) @triton.jit def triton_poi_fused_index_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 24 * x1), xmask) tmp19 = tl.load(in_ptr0 + (6 + x0 + 24 * x1), xmask) tmp23 = tl.load(in_ptr0 + (12 + x0 + 24 * x1), xmask) tmp27 = tl.load(in_ptr0 + (18 + x0 + 24 * x1), xmask) tmp1 = x0 tmp2 = tl.full([1], 3, tl.int64) tmp3 = tmp1 < tmp2 tmp4 = tl.full([1], 1, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.full([1], 2, tl.int64) tmp7 = tmp1 < tmp6 tmp8 = tl.where(tmp7, tmp6, tmp2) tmp9 = tl.where(tmp5, tmp4, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp1 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp1 < tmp12 tmp14 = tl.where(tmp13, tmp2, tmp2) tmp15 = tl.where(tmp11, tmp6, tmp14) tmp16 = tl.where(tmp3, tmp9, tmp15) tmp17 = tl.load(in_ptr1 + (4 * tmp16 + 16 * x1), xmask, eviction_policy ='evict_last') tmp18 = tmp0 * tmp17 tmp20 = tl.load(in_ptr1 + (1 + 4 * tmp16 + 16 * x1), xmask, eviction_policy='evict_last') tmp21 = tmp19 * tmp20 tmp22 = tmp18 + tmp21 tmp24 = tl.load(in_ptr1 + (2 + 4 * tmp16 + 16 * x1), xmask, eviction_policy='evict_last') tmp25 = tmp23 * tmp24 tmp26 = tmp22 + tmp25 tmp28 = tl.load(in_ptr1 + (3 + 4 * tmp16 + 16 * x1), xmask, eviction_policy='evict_last') tmp29 = tmp27 * tmp28 tmp30 = tmp26 + tmp29 tl.store(out_ptr0 + x2, tmp30, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 6, 4), (24, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(96)](primals_1, primals_2, buf0, 96, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4, 6), (6, 1), torch.float32) triton_poi_fused_index_mul_sum_1[grid(24)](buf0, primals_1, buf1, 24, XBLOCK=32, num_warps=1, num_stages=1) del buf0 return buf1, primals_1 class OuterProductLayerNew(nn.Module): """OuterProduct Layer used in PNN. This implementation is adapted from code that the author of the paper published on https://github.com/Atomu2014/product-nets. """ def __init__(self, num_feature_field, embedding_size, device): """ Args: num_feature_field(int) :number of feature fields. embedding_size(int) :number of embedding size. device(torch.device) : device object of the model. """ super(OuterProductLayerNew, self).__init__() self.num_feature_field = num_feature_field num_pairs = int(num_feature_field * (num_feature_field - 1) / 2) embed_size = embedding_size self.kernel = nn.Parameter(torch.rand(embed_size, num_pairs, embed_size), requires_grad=True) nn.init.xavier_uniform_(self.kernel) self def forward(self, input_0): primals_2 = self.kernel primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
Ahren09/RecBole
OuterProductLayer
false
1,919
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn class Model(nn.Module): """OuterProduct Layer used in PNN. This implementation is adapted from code that the author of the paper published on https://github.com/Atomu2014/product-nets. """ def __init__(self, num_feature_field, embedding_size, device): """ Args: num_feature_field(int) :number of feature fields. embedding_size(int) :number of embedding size. device(torch.device) : device object of the model. """ super().__init__() self.num_feature_field = num_feature_field num_pairs = int(num_feature_field * (num_feature_field - 1) / 2) embed_size = embedding_size self.kernel = nn.Parameter(torch.rand(embed_size, num_pairs, embed_size), requires_grad=True) nn.init.xavier_uniform_(self.kernel) self def forward(self, feat_emb): """ Args: feat_emb(torch.FloatTensor) :3D tensor with shape: [batch_size,num_pairs,embedding_size]. Returns: outer_product(torch.FloatTensor): The outer product of input tensor. shape of [batch_size, num_pairs] """ row = [] col = [] for i in range(self.num_feature_field - 1): for j in range(i + 1, self.num_feature_field): row.append(i) col.append(j) p = feat_emb[:, row] q = feat_emb[:, col] p.unsqueeze_(dim=1) p = torch.mul(p, self.kernel.unsqueeze(0)) p = torch.sum(p, dim=-1) p = torch.transpose(p, 2, 1) outer_product = p * q return outer_product.sum(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [4, 4, 0]
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ld/cldjop57q475tbpoqcv576vjehue6t4nclhegc4tag6qhroxiw2u.py # Topologically Sorted Source Nodes: [var, std, mean, sub, add, truediv, mul, add_1], Original ATen: [aten.var, aten.sqrt, aten.mean, aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mean => mean # mul => mul # std => sqrt # sub => sub # truediv => div # var => var # Graph fragment: # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp21 / tmp8 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = tmp10 / tmp25 tmp28 = tmp26 * tmp27 tmp30 = tmp28 + tmp29 tl.store(out_ptr0 + (x3), tmp30, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [var, std, mean, sub, add, truediv, mul, add_1], Original ATen: [aten.var, aten.sqrt, aten.mean, aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LayerNorm(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): std = torch.var(x, dim=1, unbiased=False, keepdim=True).sqrt() mean = torch.mean(x, dim=1, keepdim=True) return (x - mean) / (std + self.eps) * self.g + self.b def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp21 / tmp8 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = tmp10 / tmp25 tmp28 = tmp26 * tmp27 tmp30 = tmp28 + tmp29 tl.store(out_ptr0 + x3, tmp30, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, input_0): primals_2 = self.g primals_3 = self.b primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AlansBoyHeart/vit-pytorch
LayerNorm
false
1,920
[ "MIT" ]
0
1959adae0bdd7801475bba34d7d61bdc529b4616
https://github.com/AlansBoyHeart/vit-pytorch/tree/1959adae0bdd7801475bba34d7d61bdc529b4616
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): std = torch.var(x, dim=1, unbiased=False, keepdim=True).sqrt() mean = torch.mean(x, dim=1, keepdim=True) return (x - mean) / (std + self.eps) * self.g + self.b def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
Upscale2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/5a/c5a7xyzg52tfjoddmloxit4l27nteeg5esqmrvjfhym4rycl4xuk.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 4 x3 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + (4*x3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def upscale2d(x, factor=2, gain=1): assert x.dim() == 4 if gain != 1: x = x * gain if factor != 1: shape = x.shape x = x.view(shape[0], shape[1], shape[2], 1, shape[3], 1).expand(-1, -1, -1, factor, -1, factor) x = x.contiguous().view(shape[0], shape[1], factor * shape[2], factor * shape[3]) return x class Upscale2d(nn.Module): def __init__(self, factor=2, gain=1): super().__init__() assert isinstance(factor, int) and factor >= 1 self.gain = gain self.factor = factor def forward(self, x): return upscale2d(x, factor=self.factor, gain=self.gain) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 4 x3 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + x4, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1 ), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0), def upscale2d(x, factor=2, gain=1): assert x.dim() == 4 if gain != 1: x = x * gain if factor != 1: shape = x.shape x = x.view(shape[0], shape[1], shape[2], 1, shape[3], 1).expand(-1, -1, -1, factor, -1, factor) x = x.contiguous().view(shape[0], shape[1], factor * shape[2], factor * shape[3]) return x class Upscale2dNew(nn.Module): def __init__(self, factor=2, gain=1): super().__init__() assert isinstance(factor, int) and factor >= 1 self.gain = gain self.factor = factor def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AleksiKnuutila/ganspace
Upscale2d
false
1,921
[ "Apache-2.0" ]
0
23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
https://github.com/AleksiKnuutila/ganspace/tree/23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
import torch import torch.nn as nn def upscale2d(x, factor=2, gain=1): assert x.dim() == 4 if gain != 1: x = x * gain if factor != 1: shape = x.shape x = x.view(shape[0], shape[1], shape[2], 1, shape[3], 1).expand(-1, -1, -1, factor, -1, factor) x = x.contiguous().view(shape[0], shape[1], factor * shape[2], factor * shape[3]) return x class Model(nn.Module): def __init__(self, factor=2, gain=1): super().__init__() assert isinstance(factor, int) and factor >= 1 self.gain = gain self.factor = factor def forward(self, x): return upscale2d(x, factor=self.factor, gain=self.gain) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
NegSamplingLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/vz/cvzebhzwnxtfxcmfn6cjxhquzwxdt74jkr6hqhnry63fhgk7rgi5.py # Topologically Sorted Source Nodes: [mul, sigmoid, mean, neg], Original ATen: [aten.mul, aten.sigmoid, aten.mean, aten.neg] # Source node to ATen node mapping: # mean => mean # mul => mul # neg => neg # sigmoid => sigmoid # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sigmoid,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) triton_per_fused_mean_mul_neg_sigmoid_0 = async_compile.triton('triton_per_fused_mean_mul_neg_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_neg_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_mul_neg_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = -tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, sigmoid, mean, neg], Original ATen: [aten.mul, aten.sigmoid, aten.mean, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_mean_mul_neg_sigmoid_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class NegSamplingLoss(nn.Module): def __init__(self): super(NegSamplingLoss, self).__init__() def forward(self, score, sign): return -torch.mean(torch.sigmoid(sign * score)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mean_mul_neg_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = -tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_mul_neg_sigmoid_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class NegSamplingLossNew(nn.Module): def __init__(self): super(NegSamplingLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Ahren09/RecBole
NegSamplingLoss
false
1,922
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, score, sign): return -torch.mean(torch.sigmoid(sign * score)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
BaseFactorizationMachine
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/5v/c5vqsotjiyyydjenxs3ttprtusysjuigcmpcuavw4a5cbsh4movc.py # Topologically Sorted Source Nodes: [sum_1, square_of_sum, pow_2, sum_of_square, output, output_1, output_2], Original ATen: [aten.sum, aten.pow, aten.sub, aten.mul] # Source node to ATen node mapping: # output => sub # output_1 => sum_3 # output_2 => mul # pow_2 => pow_2 # square_of_sum => pow_1 # sum_1 => sum_1 # sum_of_square => sum_2 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1]), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1]), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, %sum_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [1], True), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 0.5), kwargs = {}) triton_poi_fused_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + (x2), tmp68, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [sum_1, square_of_sum, pow_2, sum_of_square, output, output_1, output_2], Original ATen: [aten.sum, aten.pow, aten.sub, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class BaseFactorizationMachine(nn.Module): """Calculate FM result over the embeddings Args: reduce_sum: bool, whether to sum the result, default is True. Input: input_x: tensor, A 3D tensor with shape:``(batch_size,field_size,embed_dim)``. Output output: tensor, A 3D tensor with shape: ``(batch_size,1)`` or ``(batch_size, embed_dim)``. """ def __init__(self, reduce_sum=True): super(BaseFactorizationMachine, self).__init__() self.reduce_sum = reduce_sum def forward(self, input_x): square_of_sum = torch.sum(input_x, dim=1) ** 2 sum_of_square = torch.sum(input_x ** 2, dim=1) output = square_of_sum - sum_of_square if self.reduce_sum: output = torch.sum(output, dim=1, keepdim=True) output = 0.5 * output return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp16 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp33 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp36 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp38 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp50 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp51 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp53 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp55 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp0 * tmp0 tmp9 = tmp1 * tmp1 tmp10 = tmp8 + tmp9 tmp11 = tmp3 * tmp3 tmp12 = tmp10 + tmp11 tmp13 = tmp5 * tmp5 tmp14 = tmp12 + tmp13 tmp15 = tmp7 - tmp14 tmp18 = tmp16 + tmp17 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp23 = tmp22 * tmp22 tmp24 = tmp16 * tmp16 tmp25 = tmp17 * tmp17 tmp26 = tmp24 + tmp25 tmp27 = tmp19 * tmp19 tmp28 = tmp26 + tmp27 tmp29 = tmp21 * tmp21 tmp30 = tmp28 + tmp29 tmp31 = tmp23 - tmp30 tmp32 = tmp15 + tmp31 tmp35 = tmp33 + tmp34 tmp37 = tmp35 + tmp36 tmp39 = tmp37 + tmp38 tmp40 = tmp39 * tmp39 tmp41 = tmp33 * tmp33 tmp42 = tmp34 * tmp34 tmp43 = tmp41 + tmp42 tmp44 = tmp36 * tmp36 tmp45 = tmp43 + tmp44 tmp46 = tmp38 * tmp38 tmp47 = tmp45 + tmp46 tmp48 = tmp40 - tmp47 tmp49 = tmp32 + tmp48 tmp52 = tmp50 + tmp51 tmp54 = tmp52 + tmp53 tmp56 = tmp54 + tmp55 tmp57 = tmp56 * tmp56 tmp58 = tmp50 * tmp50 tmp59 = tmp51 * tmp51 tmp60 = tmp58 + tmp59 tmp61 = tmp53 * tmp53 tmp62 = tmp60 + tmp61 tmp63 = tmp55 * tmp55 tmp64 = tmp62 + tmp63 tmp65 = tmp57 - tmp64 tmp66 = tmp49 + tmp65 tmp67 = 0.5 tmp68 = tmp66 * tmp67 tl.store(in_out_ptr0 + x2, tmp68, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_pow_sub_sum_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf1, class BaseFactorizationMachineNew(nn.Module): """Calculate FM result over the embeddings Args: reduce_sum: bool, whether to sum the result, default is True. Input: input_x: tensor, A 3D tensor with shape:``(batch_size,field_size,embed_dim)``. Output output: tensor, A 3D tensor with shape: ``(batch_size,1)`` or ``(batch_size, embed_dim)``. """ def __init__(self, reduce_sum=True): super(BaseFactorizationMachineNew, self).__init__() self.reduce_sum = reduce_sum def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Ahren09/RecBole
BaseFactorizationMachine
false
1,923
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn class Model(nn.Module): """Calculate FM result over the embeddings Args: reduce_sum: bool, whether to sum the result, default is True. Input: input_x: tensor, A 3D tensor with shape:``(batch_size,field_size,embed_dim)``. Output output: tensor, A 3D tensor with shape: ``(batch_size,1)`` or ``(batch_size, embed_dim)``. """ def __init__(self, reduce_sum=True): super().__init__() self.reduce_sum = reduce_sum def forward(self, input_x): square_of_sum = torch.sum(input_x, dim=1) ** 2 sum_of_square = torch.sum(input_x ** 2, dim=1) output = square_of_sum - sum_of_square if self.reduce_sum: output = torch.sum(output, dim=1, keepdim=True) output = 0.5 * output return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
InnerProductLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/cw/ccwvutm6zl37xd5gsiqvlmjxykaqval6ja4w7xuw5xctwfn5antt.py # Topologically Sorted Source Nodes: [p, q, inner_product, sum_1], Original ATen: [aten.index, aten.mul, aten.sum] # Source node to ATen node mapping: # inner_product => mul # p => index # q => index_1 # sum_1 => sum_1 # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy_1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %index_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) triton_poi_fused_index_mul_sum_0 = async_compile.triton('triton_poi_fused_index_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 6 x0 = xindex % 4 x2 = (xindex // 24) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + ((4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + ((4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (1 + (4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (1 + (4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (2 + (4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (2 + (4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (3 + (4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (3 + (4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + (x3), tmp36, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p, q, inner_product, sum_1], Original ATen: [aten.index, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_index_mul_sum_0.run(arg0_1, buf0, 96, grid=grid(96), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class InnerProductLayer(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. """ def __init__(self, num_feature_field, device): """ Args: num_feature_field(int) :number of feature fields. device(torch.device) : device object of the model. """ super(InnerProductLayer, self).__init__() self.num_feature_field = num_feature_field self def forward(self, feat_emb): """ Args: feat_emb(torch.FloatTensor) :3D tensor with shape: [batch_size,num_pairs,embedding_size]. Returns: inner_product(torch.FloatTensor): The inner product of input tensor. shape of [batch_size, num_pairs] """ row = [] col = [] for i in range(self.num_feature_field - 1): for j in range(i + 1, self.num_feature_field): row.append(i) col.append(j) p = feat_emb[:, row] q = feat_emb[:, col] inner_product = p * q return inner_product.sum(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_feature_field': 4, 'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x0 = xindex % 4 x2 = xindex // 24 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + x3, tmp36, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_mul_sum_0[grid(96)](arg0_1, buf0, 96, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class InnerProductLayerNew(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. """ def __init__(self, num_feature_field, device): """ Args: num_feature_field(int) :number of feature fields. device(torch.device) : device object of the model. """ super(InnerProductLayerNew, self).__init__() self.num_feature_field = num_feature_field self def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Ahren09/RecBole
InnerProductLayer
false
1,924
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn class Model(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. """ def __init__(self, num_feature_field, device): """ Args: num_feature_field(int) :number of feature fields. device(torch.device) : device object of the model. """ super().__init__() self.num_feature_field = num_feature_field self def forward(self, feat_emb): """ Args: feat_emb(torch.FloatTensor) :3D tensor with shape: [batch_size,num_pairs,embedding_size]. Returns: inner_product(torch.FloatTensor): The inner product of input tensor. shape of [batch_size, num_pairs] """ row = [] col = [] for i in range(self.num_feature_field - 1): for j in range(i + 1, self.num_feature_field): row.append(i) col.append(j) p = feat_emb[:, row] q = feat_emb[:, col] inner_product = p * q return inner_product.sum(dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 0]
InnerProductLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/2q/c2qgjmxnbjif6ni7qy2qblvtozrwd2f32jo4hex3rdtryfymbyul.py # Topologically Sorted Source Nodes: [mul, pos_score, neg, softplus, mul_1, neg_score, softplus_1, add, mean], Original ATen: [aten.mul, aten.sum, aten.neg, aten.softplus, aten.add, aten.mean] # Source node to ATen node mapping: # add => add # mean => mean # mul => mul # mul_1 => mul_1 # neg => neg # neg_score => sum_2 # pos_score => sum_1 # softplus => exp, gt, log1p, where # softplus_1 => exp_1, gt_1, log1p_1, where_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %neg : [num_users=3] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%neg, 20), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %neg, %log1p), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg2_1), kwargs = {}) # %sum_2 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sum_2, 20), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sum_2,), kwargs = {}) # %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %sum_2, %log1p_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %where_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {}) triton_per_fused_add_mean_mul_neg_softplus_sum_0 = async_compile.triton('triton_per_fused_add_mean_mul_neg_softplus_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_neg_softplus_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_neg_softplus_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp4 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp7 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp8 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp11 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp12 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp16 = tl.load(in_ptr2 + (r0 + (64*r1)), None) tmp18 = tl.load(in_ptr2 + (16 + r0 + (64*r1)), None) tmp21 = tl.load(in_ptr2 + (32 + r0 + (64*r1)), None) tmp24 = tl.load(in_ptr2 + (48 + r0 + (64*r1)), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -tmp14 tmp17 = tmp0 * tmp16 tmp19 = tmp3 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp7 * tmp21 tmp23 = tmp20 + tmp22 tmp25 = tmp11 * tmp24 tmp26 = tmp23 + tmp25 tmp27 = 20.0 tmp28 = tmp15 > tmp27 tmp29 = tl_math.exp(tmp15) tmp30 = libdevice.log1p(tmp29) tmp31 = tl.where(tmp28, tmp15, tmp30) tmp32 = tmp26 > tmp27 tmp33 = tl_math.exp(tmp26) tmp34 = libdevice.log1p(tmp33) tmp35 = tl.where(tmp32, tmp26, tmp34) tmp36 = tmp31 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 64.0 tmp41 = tmp39 / tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp41, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [mul, pos_score, neg, softplus, mul_1, neg_score, softplus_1, add, mean], Original ATen: [aten.mul, aten.sum, aten.neg, aten.softplus, aten.add, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_mean_mul_neg_softplus_sum_0.run(buf3, arg1_1, arg0_1, arg2_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class InnerProductLoss(nn.Module): """This is the inner-product loss used in CFKG for optimization. """ def __init__(self): super(InnerProductLoss, self).__init__() def forward(self, anchor, positive, negative): pos_score = torch.mul(anchor, positive).sum(dim=1) neg_score = torch.mul(anchor, negative).sum(dim=1) return (F.softplus(-pos_score) + F.softplus(neg_score)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_mul_neg_softplus_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp7 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp11 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp16 = tl.load(in_ptr2 + (r0 + 64 * r1), None) tmp18 = tl.load(in_ptr2 + (16 + r0 + 64 * r1), None) tmp21 = tl.load(in_ptr2 + (32 + r0 + 64 * r1), None) tmp24 = tl.load(in_ptr2 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tmp15 = -tmp14 tmp17 = tmp0 * tmp16 tmp19 = tmp3 * tmp18 tmp20 = tmp17 + tmp19 tmp22 = tmp7 * tmp21 tmp23 = tmp20 + tmp22 tmp25 = tmp11 * tmp24 tmp26 = tmp23 + tmp25 tmp27 = 20.0 tmp28 = tmp15 > tmp27 tmp29 = tl_math.exp(tmp15) tmp30 = libdevice.log1p(tmp29) tmp31 = tl.where(tmp28, tmp15, tmp30) tmp32 = tmp26 > tmp27 tmp33 = tl_math.exp(tmp26) tmp34 = libdevice.log1p(tmp33) tmp35 = tl.where(tmp32, tmp26, tmp34) tmp36 = tmp31 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 64.0 tmp41 = tmp39 / tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp41, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_per_fused_add_mean_mul_neg_softplus_sum_0[grid(1)](buf3, arg1_1, arg0_1, arg2_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf3, class InnerProductLossNew(nn.Module): """This is the inner-product loss used in CFKG for optimization. """ def __init__(self): super(InnerProductLossNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Ahren09/RecBole
InnerProductLoss
false
1,925
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """This is the inner-product loss used in CFKG for optimization. """ def __init__(self): super().__init__() def forward(self, anchor, positive, negative): pos_score = torch.mul(anchor, positive).sum(dim=1) neg_score = torch.mul(anchor, negative).sum(dim=1) return (F.softplus(-pos_score) + F.softplus(neg_score)).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return []
PatchEmbed
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/m6/cm6jnv75vls3u2vembquh4idbfd2lwrvejdtj6zsjsck6k2du72b.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1536 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (48*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/jo/cjo75cecxravp7hclcdm4uffech72iwfhimwsvxma2ng2mjrhiww.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048, 256], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 2048 xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = (yindex // 512) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (131072*y1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (256*y3)), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (512, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (512, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((512, 3, 4, 4), (48, 1, 12, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 1536, 16, grid=grid(1536, 16), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, buf0, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 512, 16, 16), (131072, 1, 8192, 512)) buf3 = empty_strided_cuda((4, 512, 16, 16), (131072, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf2, primals_2, buf3, 2048, 256, grid=grid(2048, 256), stream=stream0) del buf2 del primals_2 return (reinterpret_tensor(buf3, (4, 256, 512), (131072, 1, 256), 0), buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((512, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PatchEmbed(nn.Module): def __init__(self, img_size, patch_size, in_c=3, embed_dim=512): super(PatchEmbed, self).__init__() self.img_size = img_size self.patch_size = patch_size self.n_patches = (img_size // patch_size) ** 2 self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = self.proj(x) x = x.flatten(2) x = x.transpose(1, 2) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'img_size': 4, 'patch_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 1536 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 512 y1 = yindex // 512 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 131072 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 256 * y3), tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (512, 3, 4, 4), (48, 16, 4, 1)) assert_size_stride(primals_2, (512,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((512, 3, 4, 4), (48, 1, 12, 3), torch.float32 ) get_raw_stream(0) triton_poi_fused_0[grid(1536, 16)](primals_1, buf0, 1536, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, buf0, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 512, 16, 16), (131072, 1, 8192, 512)) buf3 = empty_strided_cuda((4, 512, 16, 16), (131072, 256, 16, 1), torch.float32) triton_poi_fused_convolution_2[grid(2048, 256)](buf2, primals_2, buf3, 2048, 256, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf2 del primals_2 return reinterpret_tensor(buf3, (4, 256, 512), (131072, 1, 256), 0 ), buf0, buf1 class PatchEmbedNew(nn.Module): def __init__(self, img_size, patch_size, in_c=3, embed_dim=512): super(PatchEmbedNew, self).__init__() self.img_size = img_size self.patch_size = patch_size self.n_patches = (img_size // patch_size) ** 2 self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, input_0): primals_1 = self.proj.weight primals_2 = self.proj.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AgamChopra/WGAN-GP
PatchEmbed
false
1,926
[ "MIT" ]
0
cbe15f4d2ef2ebaef477524103cbda0741098186
https://github.com/AgamChopra/WGAN-GP/tree/cbe15f4d2ef2ebaef477524103cbda0741098186
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, img_size, patch_size, in_c=3, embed_dim=512): super().__init__() self.img_size = img_size self.patch_size = patch_size self.n_patches = (img_size // patch_size) ** 2 self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = self.proj(x) x = x.flatten(2) x = x.transpose(1, 2) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [4, 4]
MyLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/fl/cflw6zjzdk2wqtau7m6nsei5vavjfijzxhb37zaa3xp4yxpw5yb2.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [bias], Original ATen: [aten.mul] # Source node to ATen node mapping: # bias => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [bias], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_1, buf1, 4, grid=grid(4), stream=stream0) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [bias, linear], Original ATen: [aten.mul, aten.addmm] extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_1, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, reinterpret_tensor(primals_3, (64, 4), ( 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class MyLinearNew(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, input_0): primals_2 = self.weight primals_1 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AleksiKnuutila/ganspace
MyLinear
false
1,927
[ "Apache-2.0" ]
0
23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
https://github.com/AleksiKnuutila/ganspace/tree/23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
AttLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/36/c36xldnuc3t465xlrbmm4yvftzdrqqjr5avgkhmu7tkzbkcsyqp3.py # Topologically Sorted Source Nodes: [att_signal_1, att_signal_2, att_signal_3], Original ATen: [aten.relu, aten.mul, aten.sum] # Source node to ATen node mapping: # att_signal_1 => relu # att_signal_2 => mul # att_signal_3 => sum_1 # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %primals_3), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2]), kwargs = {}) triton_poi_fused_mul_relu_sum_0 = async_compile.triton('triton_poi_fused_mul_relu_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_relu_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp13 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = tmp6 * tmp3 tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp10 * tmp3 tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = tmp14 * tmp3 tmp16 = tmp12 + tmp15 tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/dm/cdmkcxuzpnailvibeivaikqdr4zvashgzwju7qijhq5aizlo3aor.py # Topologically Sorted Source Nodes: [att_signal_4], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_signal_4 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sum_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/kt/cktghousutx6xui2sl2rvevzmb7gkacvfhntjq5n2xzeu7v57oz6.py # Topologically Sorted Source Nodes: [att_signal_4], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_signal_4 => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_signal], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_signal_1, att_signal_2, att_signal_3], Original ATen: [aten.relu, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_mul_relu_sum_0.run(buf0, primals_3, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_signal_4], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [att_signal_4], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) del buf2 return (buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as fn class AttLayer(nn.Module): """Calculate the attention signal(weight) according the input tensor. Args: infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim]. Returns: torch.FloatTensor: Attention weight of input. shape of [batch_size, M]. """ def __init__(self, in_dim, att_dim): super(AttLayer, self).__init__() self.in_dim = in_dim self.att_dim = att_dim self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim, bias=False) self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True) def forward(self, infeatures): att_signal = self.w(infeatures) att_signal = fn.relu(att_signal) att_signal = torch.mul(att_signal, self.h) att_signal = torch.sum(att_signal, dim=2) att_signal = fn.softmax(att_signal, dim=1) return att_signal def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'att_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_relu_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp13 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 * tmp3 tmp6 = triton_helpers.maximum(tmp1, tmp5) tmp7 = tmp6 * tmp3 tmp8 = tmp4 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp10 * tmp3 tmp12 = tmp8 + tmp11 tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = tmp14 * tmp3 tmp16 = tmp12 + tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_relu_sum_0[grid(64)](buf0, primals_3, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 return buf3, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), buf0, buf3 class AttLayerNew(nn.Module): """Calculate the attention signal(weight) according the input tensor. Args: infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim]. Returns: torch.FloatTensor: Attention weight of input. shape of [batch_size, M]. """ def __init__(self, in_dim, att_dim): super(AttLayerNew, self).__init__() self.in_dim = in_dim self.att_dim = att_dim self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim, bias=False) self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True) def forward(self, input_0): primals_3 = self.h primals_1 = self.w.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Ahren09/RecBole
AttLayer
false
1,928
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn import torch.nn.functional as fn class Model(nn.Module): """Calculate the attention signal(weight) according the input tensor. Args: infeatures (torch.FloatTensor): A 3D input tensor with shape of[batch_size, M, embed_dim]. Returns: torch.FloatTensor: Attention weight of input. shape of [batch_size, M]. """ def __init__(self, in_dim, att_dim): super().__init__() self.in_dim = in_dim self.att_dim = att_dim self.w = torch.nn.Linear(in_features=in_dim, out_features=att_dim, bias=False) self.h = nn.Parameter(torch.randn(att_dim), requires_grad=True) def forward(self, infeatures): att_signal = self.w(infeatures) att_signal = fn.relu(att_signal) att_signal = torch.mul(att_signal, self.h) att_signal = torch.sum(att_signal, dim=2) att_signal = fn.softmax(att_signal, dim=1) return att_signal def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
AGRUCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/by/cbypqeb7lfdlbth5j2ww7h2bluyiqc2nrbnc76btfynxntusq5wb.py # Topologically Sorted Source Nodes: [add, reset_gate, mul, add_1, new_state, sub, mul_1, mul_2, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub, aten.tanh_backward] # Source node to ATen node mapping: # add => add # add_1 => add_1 # hy => add_2 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # new_state => tanh # reset_gate => sigmoid # sub => sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %getitem_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem_5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, %mul), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_6), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %tanh), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_4), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask) tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (x2), xmask) tmp11 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp10 = tmp8 * tmp9 tmp13 = tmp11 + tmp12 tmp15 = tmp5 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp18 = tmp6 * tmp17 tmp19 = tmp10 + tmp18 tmp20 = tmp17 * tmp17 tmp21 = tmp7 - tmp20 tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp19, xmask) tl.store(out_ptr2 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [gh], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, reset_gate, mul, add_1, new_state, sub, mul_1, mul_2, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub, aten.tanh_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0.run(buf0, primals_2, buf1, primals_7, primals_6, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0) del buf0 del primals_2 return (buf3, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (16, 4), (12, 1), 8), buf2, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AGRUCell(nn.Module): ' Attention based GRU (AGRU). AGRU uses the attention score to replace the update gate of GRU, and changes the\n hidden state directly.\n\n Formally:\n ..math: {h}_{t}^{\\prime}=\\left(1-a_{t}\right) * {h}_{t-1}^{\\prime}+a_{t} * \tilde{{h}}_{t}^{\\prime}\n\n :math:`{h}_{t}^{\\prime}`, :math:`h_{t-1}^{\\prime}`, :math:`{h}_{t-1}^{\\prime}`,\n :math: `\tilde{{h}}_{t}^{\\prime}` are the hidden state of AGRU\n\n ' def __init__(self, input_size, hidden_size, bias=True): super(AGRUCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size)) self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size) ) if self.bias: self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size)) self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size)) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input, hidden_output, att_score): gi = F.linear(input, self.weight_ih, self.bias_ih) gh = F.linear(hidden_output, self.weight_hh, self.bias_hh) i_r, _i_u, i_h = gi.chunk(3, 1) h_r, _h_u, h_h = gh.chunk(3, 1) reset_gate = torch.sigmoid(i_r + h_r) new_state = torch.tanh(i_h + reset_gate * h_h) att_score = att_score.view(-1, 1) hy = (1 - att_score) * hidden_output + att_score * new_state return hy def get_inputs(): return [torch.rand([16, 4]), torch.rand([16, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask) tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x2, xmask) tmp11 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp12 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp10 = tmp8 * tmp9 tmp13 = tmp11 + tmp12 tmp15 = tmp5 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = libdevice.tanh(tmp16) tmp18 = tmp6 * tmp17 tmp19 = tmp10 + tmp18 tmp20 = tmp17 * tmp17 tmp21 = tmp7 - tmp20 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp19, xmask) tl.store(out_ptr2 + x2, tmp21, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor( primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_tanh_backward_0[grid(64)]( buf0, primals_2, buf1, primals_7, primals_6, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_2 return buf3, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (16, 4), (12, 1), 8), buf2, buf4 class AGRUCellNew(nn.Module): ' Attention based GRU (AGRU). AGRU uses the attention score to replace the update gate of GRU, and changes the\n hidden state directly.\n\n Formally:\n ..math: {h}_{t}^{\\prime}=\\left(1-a_{t}\right) * {h}_{t-1}^{\\prime}+a_{t} * \tilde{{h}}_{t}^{\\prime}\n\n :math:`{h}_{t}^{\\prime}`, :math:`h_{t-1}^{\\prime}`, :math:`{h}_{t-1}^{\\prime}`,\n :math: `\tilde{{h}}_{t}^{\\prime}` are the hidden state of AGRU\n\n ' def __init__(self, input_size, hidden_size, bias=True): super(AGRUCellNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size)) self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size) ) if self.bias: self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size)) self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size)) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input_0, input_1, input_2): primals_1 = self.weight_ih primals_4 = self.weight_hh primals_2 = self.bias_ih primals_5 = self.bias_hh primals_3 = input_0 primals_6 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Ahren09/RecBole
AGRUCell
false
1,929
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): ' Attention based GRU (AGRU). AGRU uses the attention score to replace the update gate of GRU, and changes the\n hidden state directly.\n\n Formally:\n ..math: {h}_{t}^{\\prime}=\\left(1-a_{t}\right) * {h}_{t-1}^{\\prime}+a_{t} * \tilde{{h}}_{t}^{\\prime}\n\n :math:`{h}_{t}^{\\prime}`, :math:`h_{t-1}^{\\prime}`, :math:`{h}_{t-1}^{\\prime}`,\n :math: `\tilde{{h}}_{t}^{\\prime}` are the hidden state of AGRU\n\n ' def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size)) self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size) ) if self.bias: self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size)) self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size)) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input, hidden_output, att_score): gi = F.linear(input, self.weight_ih, self.bias_ih) gh = F.linear(hidden_output, self.weight_hh, self.bias_hh) i_r, _i_u, i_h = gi.chunk(3, 1) h_r, _h_u, h_h = gh.chunk(3, 1) reset_gate = torch.sigmoid(i_r + h_r) new_state = torch.tanh(i_h + reset_gate * h_h) att_score = att_score.view(-1, 1) hy = (1 - att_score) * hidden_output + att_score * new_state return hy def get_inputs(): return [torch.rand([16, 4]), torch.rand([16, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4]
PEG
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/gu/cguvq7dzmhomcchbxvk4mcgrc7aszgsh5ytmkbdn727ju3aina23.py # Topologically Sorted Source Nodes: [conv2d, add], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # add => add # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_3), kwargs = {}) triton_poi_fused_add_convolution_0 = async_compile.triton('triton_poi_fused_add_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, add], Original ATen: [aten.convolution, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_convolution_0.run(buf1, primals_2, primals_3, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PEG(nn.Module): def __init__(self, dim, kernel_size=3): super().__init__() self.proj = Residual(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim, stride=1)) def forward(self, x): return self.proj(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_convolution_0[grid(256)](buf1, primals_2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PEGNew(nn.Module): def __init__(self, dim, kernel_size=3): super().__init__() self.proj = Residual(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim, stride=1)) def forward(self, input_0): primals_1 = self.proj.fn.weight primals_2 = self.proj.fn.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AlansBoyHeart/vit-pytorch
PEG
false
1,930
[ "MIT" ]
0
1959adae0bdd7801475bba34d7d61bdc529b4616
https://github.com/AlansBoyHeart/vit-pytorch/tree/1959adae0bdd7801475bba34d7d61bdc529b4616
import torch import torch.nn as nn class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class Model(nn.Module): def __init__(self, dim, kernel_size=3): super().__init__() self.proj = Residual(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim, stride=1)) def forward(self, x): return self.proj(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
new_class
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/vq/cvq3k2pudyocnklorpznpn2x4uwcd75kitqtmbswq44pxwjzv5lf.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class new_class(nn.Module): def __init__(self): super(new_class, self).__init__() def forward(self, input): return input + 1 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class new_classNew(nn.Module): def __init__(self): super(new_classNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AluminiumOxide/pytorch_base_-tutorial
new_class
false
1,931
[ "Apache-2.0" ]
0
a6d3bea6070c7c774dcd7c55d94b0a1441548c8b
https://github.com/AluminiumOxide/pytorch_base_-tutorial/tree/a6d3bea6070c7c774dcd7c55d94b0a1441548c8b
import torch from torch import nn class Model(nn.Module): def __init__(self): super().__init__() def forward(self, input): return input + 1 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Repeat_Explore_Mechanism
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/lo/cloa3pjph6m2k24ygelmr3bqrhzqbnqcl7unftyarinyai76sqh6.py # Topologically Sorted Source Nodes: [last_memory_2, add, output_ere], Original ATen: [aten.repeat, aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # last_memory_2 => repeat # output_ere => tanh # Graph fragment: # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 4, 1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %repeat), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_repeat_tanh_0 = async_compile.triton('triton_poi_fused_add_repeat_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_repeat_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_repeat_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 16) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [alpha_are], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha_are => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [alpha_are], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha_are => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/37/c37x3smg2vcipbhsjz546axeu2jge2hmqkrvktxkffkbchccjh4m.py # Topologically Sorted Source Nodes: [alpha_are_1, output_cre, output_cre_1], Original ATen: [aten.repeat, aten.mul, aten.sum] # Source node to ATen node mapping: # alpha_are_1 => repeat_1 # output_cre => mul # output_cre_1 => sum_2 # Graph fragment: # %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%div, [1, 1, 4]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%repeat_1, %primals_1), kwargs = {}) # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_repeat_sum_3 = async_compile.triton('triton_poi_fused_mul_repeat_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_repeat_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_repeat_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask) tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/m4/cm4zqjee2djtqrpv2iq3k3x5qsq6cqxcvagimjcpqnohz4rdhbaa.py # Topologically Sorted Source Nodes: [repeat_explore_mechanism], Original ATen: [aten._softmax] # Source node to ATen node mapping: # repeat_explore_mechanism => amax_1, div_1, exp_1, sub_1, sum_3 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mm_3, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mm_3, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_3), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (2, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [last_memory_2, add, output_ere], Original ATen: [aten.repeat, aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_repeat_tanh_0.run(buf2, buf1, 64, grid=grid(64), stream=stream0) buf3 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [output_ere_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [alpha_are], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [alpha_are], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [alpha_are_1, output_cre, output_cre_1], Original ATen: [aten.repeat, aten.mul, aten.sum] triton_poi_fused_mul_repeat_sum_3.run(buf5, primals_1, buf6, 16, grid=grid(16), stream=stream0) buf7 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [output_cre_2], Original ATen: [aten.mm] extern_kernels.mm(buf6, reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), out=buf7) buf8 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat_explore_mechanism], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf7, buf8, 8, grid=grid(8), stream=stream0) del buf7 return (buf8, primals_1, primals_4, buf2, buf5, buf6, buf8, primals_6, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Repeat_Explore_Mechanism(nn.Module): def __init__(self, device, hidden_size, seq_len, dropout_prob): super(Repeat_Explore_Mechanism, self).__init__() self.dropout = nn.Dropout(dropout_prob) self.hidden_size = hidden_size self.device = device self.seq_len = seq_len self.Wre = nn.Linear(hidden_size, hidden_size, bias=False) self.Ure = nn.Linear(hidden_size, hidden_size, bias=False) self.tanh = nn.Tanh() self.Vre = nn.Linear(hidden_size, 1, bias=False) self.Wcre = nn.Linear(hidden_size, 2, bias=False) def forward(self, all_memory, last_memory): """ calculate the probability of Repeat and explore """ all_memory_values = all_memory all_memory = self.dropout(self.Ure(all_memory)) last_memory = self.dropout(self.Wre(last_memory)) last_memory = last_memory.unsqueeze(1) last_memory = last_memory.repeat(1, self.seq_len, 1) output_ere = self.tanh(all_memory + last_memory) output_ere = self.Vre(output_ere) alpha_are = nn.Softmax(dim=1)(output_ere) alpha_are = alpha_are.repeat(1, 1, self.hidden_size) output_cre = alpha_are * all_memory_values output_cre = output_cre.sum(dim=1) output_cre = self.Wcre(output_cre) repeat_explore_mechanism = nn.Softmax(dim=-1)(output_cre) return repeat_explore_mechanism def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'device': 0, 'hidden_size': 4, 'seq_len': 4, 'dropout_prob': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_repeat_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_repeat_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask) tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (2, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_repeat_tanh_0[grid(64)](buf2, buf1, 64, XBLOCK =64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0) del buf1 extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 1), 0) del buf3 triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused_mul_repeat_sum_3[grid(16)](buf5, primals_1, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(buf6, reinterpret_tensor(primals_6, (4, 2), (1, 4 ), 0), out=buf7) buf8 = empty_strided_cuda((4, 2), (2, 1), torch.float32) triton_poi_fused__softmax_4[grid(8)](buf7, buf8, 8, XBLOCK=8, num_warps=1, num_stages=1) del buf7 return (buf8, primals_1, primals_4, buf2, buf5, buf6, buf8, primals_6, primals_5) class Repeat_Explore_MechanismNew(nn.Module): def __init__(self, device, hidden_size, seq_len, dropout_prob): super(Repeat_Explore_MechanismNew, self).__init__() self.dropout = nn.Dropout(dropout_prob) self.hidden_size = hidden_size self.device = device self.seq_len = seq_len self.Wre = nn.Linear(hidden_size, hidden_size, bias=False) self.Ure = nn.Linear(hidden_size, hidden_size, bias=False) self.tanh = nn.Tanh() self.Vre = nn.Linear(hidden_size, 1, bias=False) self.Wcre = nn.Linear(hidden_size, 2, bias=False) def forward(self, input_0, input_1): primals_2 = self.Wre.weight primals_3 = self.Ure.weight primals_5 = self.Vre.weight primals_6 = self.Wcre.weight primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Ahren09/RecBole
Repeat_Explore_Mechanism
false
1,932
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, device, hidden_size, seq_len, dropout_prob): super().__init__() self.dropout = nn.Dropout(dropout_prob) self.hidden_size = hidden_size self.device = device self.seq_len = seq_len self.Wre = nn.Linear(hidden_size, hidden_size, bias=False) self.Ure = nn.Linear(hidden_size, hidden_size, bias=False) self.tanh = nn.Tanh() self.Vre = nn.Linear(hidden_size, 1, bias=False) self.Wcre = nn.Linear(hidden_size, 2, bias=False) def forward(self, all_memory, last_memory): """ calculate the probability of Repeat and explore """ all_memory_values = all_memory all_memory = self.dropout(self.Ure(all_memory)) last_memory = self.dropout(self.Wre(last_memory)) last_memory = last_memory.unsqueeze(1) last_memory = last_memory.repeat(1, self.seq_len, 1) output_ere = self.tanh(all_memory + last_memory) output_ere = self.Vre(output_ere) alpha_are = nn.Softmax(dim=1)(output_ere) alpha_are = alpha_are.repeat(1, 1, self.hidden_size) output_cre = alpha_are * all_memory_values output_cre = output_cre.sum(dim=1) output_cre = self.Wcre(output_cre) repeat_explore_mechanism = nn.Softmax(dim=-1)(output_cre) return repeat_explore_mechanism def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'device': 0, 'hidden_size': 4, 'seq_len': 4, 'dropout_prob': 0.5}]
Downsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/jw/cjw2limywkgcxcmakwde6yhejpgbx3f2uhb57pqab2yabyayyfay.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # x => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 1, 0, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 5) % 5 x0 = xindex % 5 x2 = (xindex // 25) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 400, grid=grid(400), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Downsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = 0, 1, 0, 1 x = torch.nn.functional.pad(x, pad, mode='constant', value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'with_conv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 5 % 5 x0 = xindex % 5 x2 = xindex // 25 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 4, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = x0 tmp4 = tmp3 < tmp1 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(400)](primals_1, buf0, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0 class DownsampleNew(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AkioHayakawa/SDEdit
Downsample
false
1,933
[ "MIT" ]
0
54d793bc013ea99ae81c539bc559254fa8746e19
https://github.com/AkioHayakawa/SDEdit/tree/54d793bc013ea99ae81c539bc559254fa8746e19
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) def forward(self, x): if self.with_conv: pad = 0, 1, 0, 1 x = torch.nn.functional.pad(x, pad, mode='constant', value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
StyleMod
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/yg/cygzi6evso6kefobgrwgjcxh5qgbn7zouyf7to742xeh6bsflplb.py # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/2n/c2npkiljqvyo5boces3kqpxvha7qfatrulgz7qm2oitxblzqbabd.py # Topologically Sorted Source Nodes: [add, mul_2, x], Original ATen: [aten.add, aten.mul] # Source node to ATen node mapping: # add => add # mul_2 => mul_2 # x => add_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, 1.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %select_1), kwargs = {}) triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 4 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1 + (8*x2)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (4 + x1 + (8*x2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = tmp5 + tmp3 tmp7 = tmp0 * tmp6 tmp10 = tmp9 * tmp3 tmp11 = tmp8 + tmp10 tmp12 = tmp7 + tmp11 tl.store(out_ptr0 + (x3), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8, ), (1, )) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, mul_2, x], Original ATen: [aten.add, aten.mul] triton_poi_fused_add_mul_1.run(primals_4, buf1, primals_1, buf2, 4096, grid=grid(4096), stream=stream0) del buf1 del primals_1 return (buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) class StyleMod(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(StyleMod, self).__init__() self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale =use_wscale) def forward(self, x, latent): style = self.lin(latent) shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1] style = style.view(shape) x = x * (style[:, 0] + 1.0) + style[:, 1] return x def get_inputs(): return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'latent_size': 4, 'channels': 4, 'use_wscale': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 4 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x1 + 8 * x2), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (4 + x1 + 8 * x2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr2 + (4 + x1), None, eviction_policy='evict_last') tmp3 = 1.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = tmp5 + tmp3 tmp7 = tmp0 * tmp6 tmp10 = tmp9 * tmp3 tmp11 = tmp8 + tmp10 tmp12 = tmp7 + tmp11 tl.store(out_ptr0 + x3, tmp12, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (8,), (1,)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(32)](primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1) del buf0 buf2 = empty_strided_cuda((64, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_1[grid(4096)](primals_4, buf1, primals_1, buf2, 4096, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_1 return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) class StyleModNew(nn.Module): def __init__(self, latent_size, channels, use_wscale): super(StyleModNew, self).__init__() self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale =use_wscale) def forward(self, input_0, input_1): primals_2 = self.lin.weight primals_1 = self.lin.bias primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
AleksiKnuutila/ganspace
StyleMod
false
1,934
[ "Apache-2.0" ]
0
23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
https://github.com/AleksiKnuutila/ganspace/tree/23471a07c8b0d693fa7f1f2dfbb8b34ce22d9d38
import torch import torch.nn as nn import torch.nn.functional as F class MyLinear(nn.Module): """Linear layer with equalized learning rate and custom learning rate multiplier.""" def __init__(self, input_size, output_size, gain=2 ** 0.5, use_wscale= False, lrmul=1, bias=True): super().__init__() he_std = gain * input_size ** -0.5 if use_wscale: init_std = 1.0 / lrmul self.w_mul = he_std * lrmul else: init_std = he_std / lrmul self.w_mul = lrmul self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std) if bias: self.bias = torch.nn.Parameter(torch.zeros(output_size)) self.b_mul = lrmul else: self.bias = None def forward(self, x): bias = self.bias if bias is not None: bias = bias * self.b_mul return F.linear(x, self.weight * self.w_mul, bias) class Model(nn.Module): def __init__(self, latent_size, channels, use_wscale): super().__init__() self.lin = MyLinear(latent_size, channels * 2, gain=1.0, use_wscale =use_wscale) def forward(self, x, latent): style = self.lin(latent) shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1] style = style.view(shape) x = x * (style[:, 0] + 1.0) + style[:, 1] return x def get_inputs(): return [torch.rand([64, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4, 1.0]
CReLU
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/on/condzuwaooyuptwsw6dxafaur66nfgbtr43qtbz6jnc7qpkefima.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 8, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp10 & xmask, other=0.0) tmp14 = -tmp13 tmp15 = triton_helpers.maximum(tmp6, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + (x3), tmp18, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class CReLU(nn.Module): def __init__(self): super(CReLU, self).__init__() self.relu = nn.ReLU() def forward(self, x): return torch.cat((self.relu(x), self.relu(-x)), 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.full([1], 0, tl.int32) tmp7 = triton_helpers.maximum(tmp6, tmp5) tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 & xmask, other=0.0) tmp14 = -tmp13 tmp15 = triton_helpers.maximum(tmp6, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class CReLUNew(nn.Module): def __init__(self): super(CReLUNew, self).__init__() self.relu = nn.ReLU() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
And1210/FER_SSL
CReLU
false
1,935
[ "MIT" ]
0
6cad839261667dce30a8b9db9638ef7334953063
https://github.com/And1210/FER_SSL/tree/6cad839261667dce30a8b9db9638ef7334953063
import torch import torch.nn as nn class Model(nn.Module): def __init__(self): super().__init__() self.relu = nn.ReLU() def forward(self, x): return torch.cat((self.relu(x), self.relu(-x)), 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AUGRUCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/su/csuyns6mfp66oncomzo5eikxszcgvnk2yq5hnh5qodgq2yxjppwg.py # Topologically Sorted Source Nodes: [add, reset_gate, add_1, update_gate, mul, add_2, new_state, update_gate_1, sub, mul_2, mul_3, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # hy => add_3 # mul => mul # mul_2 => mul_2 # mul_3 => mul_3 # new_state => tanh # reset_gate => sigmoid # sub => sub # update_gate => sigmoid_1 # update_gate_1 => mul_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, %getitem_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_1, %getitem_4), kwargs = {}) # %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %getitem_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, %mul), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %sigmoid_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_6), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %tanh), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {}) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4 + x0 + (12*x1)), xmask) tmp6 = tl.load(in_ptr0 + (x0 + (12*x1)), xmask) tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x0 + (12*x1)), xmask) tmp12 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (8 + x0 + (12*x1)), xmask) tmp19 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr4 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp11 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = libdevice.tanh(tmp17) tmp20 = tmp19 * tmp5 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp20 * tmp18 tmp26 = tmp24 + tmp25 tl.store(out_ptr0 + (x2), tmp5, xmask) tl.store(out_ptr1 + (x2), tmp11, xmask) tl.store(out_ptr2 + (x2), tmp18, xmask) tl.store(out_ptr3 + (x2), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (64, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12, ), (1, )) assert_size_stride(primals_6, (64, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [gh], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, reset_gate, add_1, update_gate, mul, add_2, new_state, update_gate_1, sub, mul_2, mul_3, hy], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.tanh, aten.rsub] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0.run(buf0, primals_2, buf1, primals_7, primals_6, buf3, buf2, buf4, buf5, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 return (buf5, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (64, 4), (12, 1), 8), buf2, buf3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AUGRUCell(nn.Module): ' Effect of GRU with attentional update gate (AUGRU). AUGRU combines attention mechanism and GRU seamlessly.\n\n Formally:\n ..math: \tilde{{u}}_{t}^{\\prime}=a_{t} * {u}_{t}^{\\prime} \\\n {h}_{t}^{\\prime}=\\left(1-\tilde{{u}}_{t}^{\\prime}\right) \\circ {h}_{t-1}^{\\prime}+\tilde{{u}}_{t}^{\\prime} \\circ \tilde{{h}}_{t}^{\\prime}\n\n ' def __init__(self, input_size, hidden_size, bias=True): super(AUGRUCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size)) self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size) ) if bias: self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size)) self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size)) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input, hidden_output, att_score): gi = F.linear(input, self.weight_ih, self.bias_ih) gh = F.linear(hidden_output, self.weight_hh, self.bias_hh) i_r, i_u, i_h = gi.chunk(3, 1) h_r, h_u, h_h = gh.chunk(3, 1) reset_gate = torch.sigmoid(i_r + h_r) update_gate = torch.sigmoid(i_u + h_u) new_state = torch.tanh(i_h + reset_gate * h_h) att_score = att_score.view(-1, 1) update_gate = att_score * update_gate hy = (1 - update_gate) * hidden_output + update_gate * new_state return hy def get_inputs(): return [torch.rand([64, 4]), torch.rand([64, 4]), torch.rand([16, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (4 + x0 + 12 * x1), xmask) tmp6 = tl.load(in_ptr0 + (x0 + 12 * x1), xmask) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + (x0 + 12 * x1), xmask) tmp12 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp13 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (8 + x0 + 12 * x1), xmask) tmp19 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp14 = tmp12 + tmp13 tmp16 = tmp11 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = libdevice.tanh(tmp17) tmp20 = tmp19 * tmp5 tmp21 = 1.0 tmp22 = tmp21 - tmp20 tmp24 = tmp22 * tmp23 tmp25 = tmp20 * tmp18 tmp26 = tmp24 + tmp25 tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr2 + x2, tmp18, xmask) tl.store(out_ptr3 + x2, tmp26, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (64, 4), (4, 1)) assert_size_stride(primals_4, (12, 4), (4, 1)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (64, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 12), (12, 1), torch.float32) extern_kernels.addmm(primals_5, primals_6, reinterpret_tensor( primals_4, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf0, primals_2, buf1, primals_7, primals_6, buf3, buf2, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf5, primals_3, primals_6, primals_7, reinterpret_tensor(buf1, (64, 4), (12, 1), 8), buf2, buf3, buf4 class AUGRUCellNew(nn.Module): ' Effect of GRU with attentional update gate (AUGRU). AUGRU combines attention mechanism and GRU seamlessly.\n\n Formally:\n ..math: \tilde{{u}}_{t}^{\\prime}=a_{t} * {u}_{t}^{\\prime} \\\n {h}_{t}^{\\prime}=\\left(1-\tilde{{u}}_{t}^{\\prime}\right) \\circ {h}_{t-1}^{\\prime}+\tilde{{u}}_{t}^{\\prime} \\circ \tilde{{h}}_{t}^{\\prime}\n\n ' def __init__(self, input_size, hidden_size, bias=True): super(AUGRUCellNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size)) self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size) ) if bias: self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size)) self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size)) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input_0, input_1, input_2): primals_1 = self.weight_ih primals_4 = self.weight_hh primals_2 = self.bias_ih primals_5 = self.bias_hh primals_3 = input_0 primals_6 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Ahren09/RecBole
AUGRUCell
false
1,936
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): ' Effect of GRU with attentional update gate (AUGRU). AUGRU combines attention mechanism and GRU seamlessly.\n\n Formally:\n ..math: \tilde{{u}}_{t}^{\\prime}=a_{t} * {u}_{t}^{\\prime} \\\n {h}_{t}^{\\prime}=\\left(1-\tilde{{u}}_{t}^{\\prime}\right) \\circ {h}_{t-1}^{\\prime}+\tilde{{u}}_{t}^{\\prime} \\circ \tilde{{h}}_{t}^{\\prime}\n\n ' def __init__(self, input_size, hidden_size, bias=True): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.bias = bias self.weight_ih = nn.Parameter(torch.randn(3 * hidden_size, input_size)) self.weight_hh = nn.Parameter(torch.randn(3 * hidden_size, hidden_size) ) if bias: self.bias_ih = nn.Parameter(torch.zeros(3 * hidden_size)) self.bias_hh = nn.Parameter(torch.zeros(3 * hidden_size)) else: self.register_parameter('bias_ih', None) self.register_parameter('bias_hh', None) def forward(self, input, hidden_output, att_score): gi = F.linear(input, self.weight_ih, self.bias_ih) gh = F.linear(hidden_output, self.weight_hh, self.bias_hh) i_r, i_u, i_h = gi.chunk(3, 1) h_r, h_u, h_h = gh.chunk(3, 1) reset_gate = torch.sigmoid(i_r + h_r) update_gate = torch.sigmoid(i_u + h_u) new_state = torch.tanh(i_h + reset_gate * h_h) att_score = att_score.view(-1, 1) update_gate = att_score * update_gate hy = (1 - update_gate) * hidden_output + update_gate * new_state return hy def get_inputs(): return [torch.rand([64, 4]), torch.rand([64, 4]), torch.rand([16, 4])] def get_init_inputs(): return [4, 4]
GlobalAvgPool2d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class GlobalAvgPool2d(nn.Module): """Performs global average pooling over the entire height and width of a batched 2D tensor # Arguments input: Input tensor """ def forward(self, input): return nn.functional.avg_pool2d(input, kernel_size=input.size()[2:] ).view(-1, input.size(1)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class GlobalAvgPool2dNew(nn.Module): """Performs global average pooling over the entire height and width of a batched 2D tensor # Arguments input: Input tensor """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AndreasLTeigen/few_shot_open_world
GlobalAvgPool2d
false
1,937
[ "MIT" ]
0
3514824c4233fdff9af9c0b636435b2ff0fa6e09
https://github.com/AndreasLTeigen/few_shot_open_world/tree/3514824c4233fdff9af9c0b636435b2ff0fa6e09
import torch from torch import nn class Model(nn.Module): """Performs global average pooling over the entire height and width of a batched 2D tensor # Arguments input: Input tensor """ def forward(self, input): return nn.functional.avg_pool2d(input, kernel_size=input.size()[2:] ).view(-1, input.size(1)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
Upsample
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/oj/cojl5mb3pzv5jbmfzjkbac5hekbmpvb72kof6ouyyasitrogdd6n.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/mt/cmt4roffhwfg6vw2odjfrgu4bjav3cztqx74kxjfq5igljucibfl.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_1 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 1024, grid=grid(1024), stream=stream0) del primals_3 return (buf2, primals_2, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Upsample(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode='nearest' ) if self.with_conv: x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'with_conv': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(1024)](buf2, primals_3, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class UpsampleNew(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AkioHayakawa/SDEdit
Upsample
false
1,938
[ "MIT" ]
0
54d793bc013ea99ae81c539bc559254fa8746e19
https://github.com/AkioHayakawa/SDEdit/tree/54d793bc013ea99ae81c539bc559254fa8746e19
import torch import torch.nn as nn class Model(nn.Module): def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, x): x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode='nearest' ) if self.with_conv: x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4, 4]
Model
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ra/craxuzdmkcujw3orw5ikfgvb3ib3hespmbj7twbz6ahrvb64c6k5.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 25 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/fy/cfygtidgvtwf43xsn75bsupb4zvnusaxwz77mmklmgyfj3gj3wot.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_5 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 75 x2 = (xindex // 1200) x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + (1216*x2)), tmp4, xmask) tl.store(out_ptr1 + (x3 + (1280*x2)), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/wm/cwm3bll7ka2xnresr66yjk73lkjwnsqlfaitwjxeapp4hoflayhn.py # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.relu, aten.view] # Source node to ATen node mapping: # x_5 => relu_1 # x_6 => view_4 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %view_4 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%relu_1, [64, 75]), kwargs = {}) triton_poi_fused_relu_view_2 = async_compile.triton('triton_poi_fused_relu_view_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_view_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 75 x1 = (xindex // 75) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (75*(x1 % 16)) + (1216*(x1 // 16))), xmask) tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/dz/cdzzjqufxgjdtwmtqoqggqn2ny2ysfyvvnngvb35noosm27wiln3.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_8 => relu_2 # Graph fragment: # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/o7/co7cl37yu5xdwxammabqbyexb45r6cbckqzsws3e4k6r5lhudgyy.py # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_11 => relu_3 # Graph fragment: # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (25, 4), (4, 1)) assert_size_stride(primals_2, (25, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (75, 25), (25, 1)) assert_size_stride(primals_5, (75, ), (1, )) assert_size_stride(primals_6, (200, 75), (75, 1)) assert_size_stride(primals_7, (200, ), (1, )) assert_size_stride(primals_8, (4, 200), (200, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 25), (25, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 25), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 25), (400, 100, 25, 1), 0); del buf0 # reuse buf12 = empty_strided_cuda((4, 4, 4, 25), (400, 100, 25, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf12, 1600, grid=grid(1600), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 75), (75, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 25), (25, 1), 0), reinterpret_tensor(primals_4, (25, 75), (1, 25), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 75), (1216, 300, 75, 1), torch.float32) buf11 = empty_strided_cuda((4, 4, 4, 75), (1280, 300, 75, 1), torch.bool) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_5, buf3, buf11, 4800, grid=grid(4800), stream=stream0) del primals_5 buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.relu, aten.view] triton_poi_fused_relu_view_2.run(buf3, buf4, 4800, grid=grid(4800), stream=stream0) del buf3 buf5 = empty_strided_cuda((64, 200), (200, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (75, 200), (1, 75), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 200), (3200, 800, 200, 1), 0); del buf5 # reuse buf10 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_3.run(buf6, primals_7, buf10, 12800, grid=grid(12800), stream=stream0) del primals_7 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf6, (64, 200), (200, 1), 0), reinterpret_tensor(primals_8, (200, 4), (1, 200), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf7 # reuse buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_4.run(buf8, primals_9, buf9, 256, grid=grid(256), stream=stream0) del primals_9 return (buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 25), (25, 1), 0), buf4, reinterpret_tensor(buf6, (64, 200), (200, 1), 0), buf9, primals_8, buf10, primals_6, buf11, primals_4, buf12, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((25, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((25, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((75, 25), (25, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((75, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((200, 75), (75, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 200), (200, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from torch.nn import Module import torch from torch.nn import Linear import torch.nn as nn import torch.nn.functional as F class Model(Module): def __init__(self, input_shape, nb_classes, *args, **kwargs): super(Model, self).__init__() self.fc1 = Linear(input_shape[0], 25) self.dropout1 = nn.Dropout(0.25) self.fc2 = Linear(25, 75) self.dropout2 = nn.Dropout(0.3) self.fc3 = Linear(75, 200) self.dropout3 = nn.Dropout(0.3) self.fc4 = Linear(200, nb_classes) self.dropout4 = nn.Dropout(0.3) def forward(self, x): x = self.fc1(x) x = self.dropout1(x) x = F.relu(x) x = self.fc2(x) x = self.dropout2(x) x = F.relu(x) x = self.fc3(x) x = self.dropout3(x) x = F.relu(x) x = self.fc4(x) x = self.dropout4(x) x = F.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': [4, 4], 'nb_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.nn import Module from torch.nn import Linear import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 25 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 75 x2 = xindex // 1200 x3 = xindex % 1200 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x3 + 1216 * x2), tmp4, xmask) tl.store(out_ptr1 + (x3 + 1280 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_relu_view_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 75 x1 = xindex // 75 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 75 * (x1 % 16) + 1216 * (x1 // 16)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 200 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (25, 4), (4, 1)) assert_size_stride(primals_2, (25,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (75, 25), (25, 1)) assert_size_stride(primals_5, (75,), (1,)) assert_size_stride(primals_6, (200, 75), (75, 1)) assert_size_stride(primals_7, (200,), (1,)) assert_size_stride(primals_8, (4, 200), (200, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 25), (25, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 25), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 25), (400, 100, 25, 1), 0) del buf0 buf12 = empty_strided_cuda((4, 4, 4, 25), (400, 100, 25, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1600)](buf1, primals_2, buf12, 1600, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 75), (75, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 25), (25, 1), 0), reinterpret_tensor(primals_4, (25, 75), (1, 25), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 75), (1216, 300, 75, 1), torch. float32) buf11 = empty_strided_cuda((4, 4, 4, 75), (1280, 300, 75, 1), torch .bool) triton_poi_fused_relu_threshold_backward_1[grid(4800)](buf2, primals_5, buf3, buf11, 4800, XBLOCK=256, num_warps=4, num_stages=1 ) del primals_5 buf4 = buf2 del buf2 triton_poi_fused_relu_view_2[grid(4800)](buf3, buf4, 4800, XBLOCK= 128, num_warps=4, num_stages=1) del buf3 buf5 = empty_strided_cuda((64, 200), (200, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_6, (75, 200), (1, 75), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 200), (3200, 800, 200, 1), 0) del buf5 buf10 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(12800)](buf6, primals_7, buf10, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (64, 200), (200, 1), 0), reinterpret_tensor(primals_8, (200, 4), (1, 200), 0), out=buf7) buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_4[grid(256)](buf8, primals_9, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 25), (25, 1), 0 ), buf4, reinterpret_tensor(buf6, (64, 200), (200, 1), 0 ), buf9, primals_8, buf10, primals_6, buf11, primals_4, buf12 class ModelNew(Module): def __init__(self, input_shape, nb_classes, *args, **kwargs): super(ModelNew, self).__init__() self.fc1 = Linear(input_shape[0], 25) self.dropout1 = nn.Dropout(0.25) self.fc2 = Linear(25, 75) self.dropout2 = nn.Dropout(0.3) self.fc3 = Linear(75, 200) self.dropout3 = nn.Dropout(0.3) self.fc4 = Linear(200, nb_classes) self.dropout4 = nn.Dropout(0.3) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
AishaAlaagib/machine-unlearning
Model
false
1,939
[ "MIT" ]
0
28dd55792bacb1ffccda788b4f4dcce09e113b37
https://github.com/AishaAlaagib/machine-unlearning/tree/28dd55792bacb1ffccda788b4f4dcce09e113b37
from torch.nn import Module import torch from torch.nn import Linear import torch.nn as nn import torch.nn.functional as F class Model(Module): def __init__(self, input_shape, nb_classes, *args, **kwargs): super(Model, self).__init__() self.fc1 = Linear(input_shape[0], 25) self.dropout1 = nn.Dropout(0.25) self.fc2 = Linear(25, 75) self.dropout2 = nn.Dropout(0.3) self.fc3 = Linear(75, 200) self.dropout3 = nn.Dropout(0.3) self.fc4 = Linear(200, nb_classes) self.dropout4 = nn.Dropout(0.3) def forward(self, x): x = self.fc1(x) x = self.dropout1(x) x = F.relu(x) x = self.fc2(x) x = self.dropout2(x) x = F.relu(x) x = self.fc3(x) x = self.dropout3(x) x = F.relu(x) x = self.fc4(x) x = self.dropout4(x) x = F.relu(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
VAE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/3q/c3qwr2d2rrpjzvnddomnmdy6cwva4hjlvrn2y5epemk4ak3k2m6c.py # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] # Source node to ATen node mapping: # h1 => relu # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/jd/cjdtedosfkoutmd76tpyaejxhpwspa7takf5bagemxu5kt4jquxx.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (400, 784), (784, 1)) assert_size_stride(primals_3, (400, ), (1, )) assert_size_stride(primals_4, (20, 400), (400, 1)) assert_size_stride(primals_5, (20, ), (1, )) assert_size_stride(primals_6, (20, 400), (400, 1)) assert_size_stride(primals_7, (20, ), (1, )) assert_size_stride(primals_8, (400, 20), (20, 1)) assert_size_stride(primals_9, (400, ), (1, )) assert_size_stride(primals_10, (784, 400), (400, 1)) assert_size_stride(primals_11, (784, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 1600, grid=grid(1600), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32) # Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 400), (400, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf2, reinterpret_tensor(primals_8, (20, 400), (1, 20), 0), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [h3], Original ATen: [aten.relu] triton_poi_fused_relu_0.run(buf5, primals_9, 1600, grid=grid(1600), stream=stream0) del primals_9 buf6 = empty_strided_cuda((4, 784), (784, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_10, (400, 784), (1, 400), 0), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf7, primals_11, 3136, grid=grid(3136), stream=stream0) del primals_11 return (buf7, buf2, buf3, primals_1, buf1, buf2, buf5, buf7, primals_10, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((400, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((20, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((20, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((400, 20), (20, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((784, 400), (400, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((784, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn class VAE(nn.Module): def __init__(self): super(VAE, self).__init__() self.fc1 = nn.Linear(784, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def encode(self, x): h1 = self.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def decode(self, z): h3 = self.relu(self.fc3(z)) return self.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 784)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 400 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (400, 784), (784, 1)) assert_size_stride(primals_3, (400,), (1,)) assert_size_stride(primals_4, (20, 400), (400, 1)) assert_size_stride(primals_5, (20,), (1,)) assert_size_stride(primals_6, (20, 400), (400, 1)) assert_size_stride(primals_7, (20,), (1,)) assert_size_stride(primals_8, (400, 20), (20, 1)) assert_size_stride(primals_9, (400,), (1,)) assert_size_stride(primals_10, (784, 400), (400, 1)) assert_size_stride(primals_11, (784,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1600)](buf1, primals_3, 1600, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32) extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 400), (400, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_8, (20, 400), (1, 20), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_0[grid(1600)](buf5, primals_9, 1600, XBLOCK= 128, num_warps=4, num_stages=1) del primals_9 buf6 = empty_strided_cuda((4, 784), (784, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_10, (400, 784), (1, 400), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_sigmoid_1[grid(3136)](buf7, primals_11, 3136, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (buf7, buf2, buf3, primals_1, buf1, buf2, buf5, buf7, primals_10, primals_8, primals_6, primals_4) class VAENew(nn.Module): def __init__(self): super(VAENew, self).__init__() self.fc1 = nn.Linear(784, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def encode(self, x): h1 = self.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def decode(self, z): h3 = self.relu(self.fc3(z)) return self.sigmoid(self.fc4(h3)) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc21.weight primals_5 = self.fc21.bias primals_6 = self.fc22.weight primals_7 = self.fc22.bias primals_8 = self.fc3.weight primals_9 = self.fc3.bias primals_10 = self.fc4.weight primals_11 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0], output[1], output[2]
AlexTaguchi/vae-example
VAE
false
1,940
[ "MIT" ]
0
8c647f248cc6e017fc6c5e7bb17c4a552e50ee1d
https://github.com/AlexTaguchi/vae-example/tree/8c647f248cc6e017fc6c5e7bb17c4a552e50ee1d
import torch import torch.utils.data from torch import nn class Model(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 400) self.fc21 = nn.Linear(400, 20) self.fc22 = nn.Linear(400, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def encode(self, x): h1 = self.relu(self.fc1(x)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def decode(self, z): h3 = self.relu(self.fc3(z)) return self.sigmoid(self.fc4(h3)) def forward(self, x): mu, logvar = self.encode(x.view(-1, 784)) z = self.reparameterize(mu, logvar) return self.decode(z), mu, logvar def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return []
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/w3/cw3egt7ajdde7mbqzrdxs4mdcaxj75b4l3brz5gbsf4yd73gbids.py # Topologically Sorted Source Nodes: [dout], Original ATen: [aten.relu] # Source node to ATen node mapping: # dout => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/i5/ci5f4nyelvfg4yf2o65ompoikj7ejkd32vb6hqtyrgycc5eswrpx.py # Topologically Sorted Source Nodes: [dout_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # dout_1 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/mb/cmbgucoluusz6nnhhljq67pcckhl4nj5x6sgk37o4foslzfprmnz.py # Topologically Sorted Source Nodes: [dout_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # dout_2 => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_2, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (10*x0)), tmp11, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (512, 784), (784, 1)) assert_size_stride(primals_3, (512, ), (1, )) assert_size_stride(primals_4, (128, 512), (512, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (10, 128), (128, 1)) assert_size_stride(primals_7, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 512), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [dout], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, primals_3, 2048, grid=grid(2048), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 128), (1, 512), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [dout_1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_5, 512, grid=grid(512), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [dout_2], Original ATen: [aten._softmax] triton_per_fused__softmax_2.run(buf4, buf7, 4, 10, grid=grid(4), stream=stream0) del buf4 return (buf7, primals_1, buf1, buf3, buf7, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((512, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.utils.data class MLP(torch.nn.Module): def __init__(self): super(MLP, self).__init__() self.fc1 = torch.nn.Linear(784, 512) self.fc2 = torch.nn.Linear(512, 128) self.fc3 = torch.nn.Linear(128, 10) def forward(self, din): din = din.view(-1, 28 * 28) dout = F.relu(self.fc1(din)) dout = F.relu(self.fc2(dout)) dout = F.softmax(self.fc3(dout), dim=1) return dout def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 10 * x0), tmp11, rmask & xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (512, 784), (784, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (128, 512), (512, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (10, 128), (128, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 512), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(2048)](buf1, primals_3, 2048, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 128), ( 1, 512), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(512)](buf3, primals_5, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 buf7 = empty_strided_cuda((4, 10), (10, 1), torch.float32) triton_per_fused__softmax_2[grid(4)](buf4, buf7, 4, 10, XBLOCK=1, num_warps=2, num_stages=1) del buf4 return buf7, primals_1, buf1, buf3, buf7, primals_6, primals_4 class MLPNew(torch.nn.Module): def __init__(self): super(MLPNew, self).__init__() self.fc1 = torch.nn.Linear(784, 512) self.fc2 = torch.nn.Linear(512, 128) self.fc3 = torch.nn.Linear(128, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
AllenPu/DomainBed
MLP
false
1,941
[ "MIT" ]
0
77519d71471e67f0356134abe0bf01a6dd2fdcfa
https://github.com/AllenPu/DomainBed/tree/77519d71471e67f0356134abe0bf01a6dd2fdcfa
import torch import torch.nn.functional as F import torch.utils.data class Model(torch.nn.Module): def __init__(self): super().__init__() self.fc1 = torch.nn.Linear(784, 512) self.fc2 = torch.nn.Linear(512, 128) self.fc3 = torch.nn.Linear(128, 10) def forward(self, din): din = din.view(-1, 28 * 28) dout = F.relu(self.fc1(din)) dout = F.relu(self.fc2(dout)) dout = F.softmax(self.fc3(dout), dim=1) return dout def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return []
CVAE
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/dq/cdqio62j6riqftfgbj3c6vjzgtz5eop6jf3nr6mys7n72ylbq2zi.py # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.cat] # Source node to ATen node mapping: # inputs => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 788 x1 = (xindex // 788) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 784, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((784*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 788, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-784) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] # Source node to ATen node mapping: # h1 => relu # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_4), kwargs = {}) # %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/zd/czdeq2ohbgubcyeps2ukquvfhigxtyega57i24ketclusfgmyedi.py # Topologically Sorted Source Nodes: [inputs_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # inputs_1 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%addmm_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/6q/c6qzsmx3f6trdhdw64k4yax7gjvrvfglgxm6cs6u5pcvjitf26mr.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_12), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 788), (788, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 8), (8, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (784, 4), (4, 1)) assert_size_stride(primals_12, (784, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 788), (788, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 3152, grid=grid(3152), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (788, 4), (1, 788), 0), out=buf1) del primals_3 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf2, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs_1], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf3, primals_2, buf5, 32, grid=grid(32), stream=stream0) del primals_2 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf5, reinterpret_tensor(primals_9, (8, 4), (1, 8), 0), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [h3], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf7, primals_10, 16, grid=grid(16), stream=stream0) del primals_10 buf8 = empty_strided_cuda((4, 784), (784, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf7, reinterpret_tensor(primals_11, (4, 784), (1, 4), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_3.run(buf9, primals_12, 3136, grid=grid(3136), stream=stream0) del primals_12 return (buf9, buf3, buf4, buf0, buf2, buf5, buf7, buf9, primals_11, primals_9, primals_7, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 788), (788, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((784, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((784, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data from torch import nn class CVAE(nn.Module): def __init__(self, conditional_size, hidden_size, latent_size): super(CVAE, self).__init__() self.fc1 = nn.Linear(28 * 28 + conditional_size, hidden_size) self.fc21 = nn.Linear(hidden_size, latent_size) self.fc22 = nn.Linear(hidden_size, latent_size) self.fc3 = nn.Linear(latent_size + conditional_size, hidden_size) self.fc4 = nn.Linear(hidden_size, 28 * 28) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def encode(self, x, c): inputs = torch.cat([x, c], 1) h1 = self.relu(self.fc1(inputs)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def decode(self, z, c): inputs = torch.cat([z, c], 1) h3 = self.relu(self.fc3(inputs)) return self.sigmoid(self.fc4(h3)) def forward(self, x, c): mu, logvar = self.encode(x.view(-1, 28 * 28), c) z = self.reparameterize(mu, logvar) return self.decode(z, c), mu, logvar def get_inputs(): return [torch.rand([4, 784]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'conditional_size': 4, 'hidden_size': 4, 'latent_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 3152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 788 x1 = xindex // 788 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 784, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (784 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 788, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-784 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 784 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 788), (788, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 8), (8, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (784, 4), (4, 1)) assert_size_stride(primals_12, (784,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 788), (788, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(3152)](primals_1, primals_2, buf0, 3152, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (788, 4), (1, 788), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf2, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_2[grid(32)](buf3, primals_2, buf5, 32, XBLOCK= 32, num_warps=1, num_stages=1) del primals_2 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_9, (8, 4), (1, 8 ), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_1[grid(16)](buf7, primals_10, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_10 buf8 = empty_strided_cuda((4, 784), (784, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_11, (4, 784), (1, 4), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_sigmoid_3[grid(3136)](buf9, primals_12, 3136, XBLOCK=256, num_warps=4, num_stages=1) del primals_12 return (buf9, buf3, buf4, buf0, buf2, buf5, buf7, buf9, primals_11, primals_9, primals_7, primals_5) class CVAENew(nn.Module): def __init__(self, conditional_size, hidden_size, latent_size): super(CVAENew, self).__init__() self.fc1 = nn.Linear(28 * 28 + conditional_size, hidden_size) self.fc21 = nn.Linear(hidden_size, latent_size) self.fc22 = nn.Linear(hidden_size, latent_size) self.fc3 = nn.Linear(latent_size + conditional_size, hidden_size) self.fc4 = nn.Linear(hidden_size, 28 * 28) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def encode(self, x, c): inputs = torch.cat([x, c], 1) h1 = self.relu(self.fc1(inputs)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def decode(self, z, c): inputs = torch.cat([z, c], 1) h3 = self.relu(self.fc3(inputs)) return self.sigmoid(self.fc4(h3)) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_2 = self.fc21.weight primals_6 = self.fc21.bias primals_5 = self.fc22.weight primals_8 = self.fc22.bias primals_9 = self.fc3.weight primals_10 = self.fc3.bias primals_11 = self.fc4.weight primals_12 = self.fc4.bias primals_1 = input_0 primals_7 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1], output[2]
AlexTaguchi/vae-example
CVAE
false
1,942
[ "MIT" ]
0
8c647f248cc6e017fc6c5e7bb17c4a552e50ee1d
https://github.com/AlexTaguchi/vae-example/tree/8c647f248cc6e017fc6c5e7bb17c4a552e50ee1d
import torch import torch.utils.data from torch import nn class Model(nn.Module): def __init__(self, conditional_size, hidden_size, latent_size): super().__init__() self.fc1 = nn.Linear(28 * 28 + conditional_size, hidden_size) self.fc21 = nn.Linear(hidden_size, latent_size) self.fc22 = nn.Linear(hidden_size, latent_size) self.fc3 = nn.Linear(latent_size + conditional_size, hidden_size) self.fc4 = nn.Linear(hidden_size, 28 * 28) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def encode(self, x, c): inputs = torch.cat([x, c], 1) h1 = self.relu(self.fc1(inputs)) return self.fc21(h1), self.fc22(h1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def decode(self, z, c): inputs = torch.cat([z, c], 1) h3 = self.relu(self.fc3(inputs)) return self.sigmoid(self.fc4(h3)) def forward(self, x, c): mu, logvar = self.encode(x.view(-1, 28 * 28), c) z = self.reparameterize(mu, logvar) return self.decode(z, c), mu, logvar def get_inputs(): return [torch.rand([4, 784]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4]
GlobalMaxPool1d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/l7/cl7te2znkphosjjgl5c32tkhoiehr4feuczsseen53wz7j4ifolb.py # Topologically Sorted Source Nodes: [max_pool1d], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # max_pool1d => _low_memory_max_pool2d_with_offsets # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 4], [1, 4], [0, 0], [1, 1], False), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [max_pool1d], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class GlobalMaxPool1d(nn.Module): """Performs global max pooling over the entire length of a batched 1D tensor # Arguments input: Input tensor """ def forward(self, input): return nn.functional.max_pool1d(input, kernel_size=input.size()[2:] ).view(-1, input.size(1)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4), (4, 1), 0), class GlobalMaxPool1dNew(nn.Module): """Performs global max pooling over the entire length of a batched 1D tensor # Arguments input: Input tensor """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
AndreasLTeigen/few_shot_open_world
GlobalMaxPool1d
false
1,943
[ "MIT" ]
0
3514824c4233fdff9af9c0b636435b2ff0fa6e09
https://github.com/AndreasLTeigen/few_shot_open_world/tree/3514824c4233fdff9af9c0b636435b2ff0fa6e09
import torch from torch import nn class Model(nn.Module): """Performs global max pooling over the entire length of a batched 1D tensor # Arguments input: Input tensor """ def forward(self, input): return nn.functional.max_pool1d(input, kernel_size=input.size()[2:] ).view(-1, input.size(1)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return []
ConvNCFBPRLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/k4/ck4thowjujufjoitsqemh6ciznb7idjuufiayjqrkybsqiz2p7z7.py # Topologically Sorted Source Nodes: [distance, neg, exp, add, log, loss], Original ATen: [aten.sub, aten.neg, aten.exp, aten.add, aten.log, aten.sum] # Source node to ATen node mapping: # add => add # distance => sub # exp => exp # log => log # loss => sum_1 # neg => neg # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%log,), kwargs = {}) triton_per_fused_add_exp_log_neg_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_log_neg_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_log_neg_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_exp_log_neg_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = -tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = 1.0 tmp6 = tmp4 + tmp5 tmp7 = tl_math.log(tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [distance, neg, exp, add, log, loss], Original ATen: [aten.sub, aten.neg, aten.exp, aten.add, aten.log, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_add_exp_log_neg_sub_sum_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class ConvNCFBPRLoss(nn.Module): """ ConvNCFBPRLoss, based on Bayesian Personalized Ranking, Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = ConvNCFBPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self): super(ConvNCFBPRLoss, self).__init__() def forward(self, pos_score, neg_score): distance = pos_score - neg_score loss = torch.sum(torch.log(1 + torch.exp(-distance))) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_log_neg_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = -tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = 1.0 tmp6 = tmp4 + tmp5 tmp7 = tl_math.log(tmp6) tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_exp_log_neg_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class ConvNCFBPRLossNew(nn.Module): """ ConvNCFBPRLoss, based on Bayesian Personalized Ranking, Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = ConvNCFBPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self): super(ConvNCFBPRLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Ahren09/RecBole
ConvNCFBPRLoss
false
1,944
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn class Model(nn.Module): """ ConvNCFBPRLoss, based on Bayesian Personalized Ranking, Shape: - Pos_score: (N) - Neg_score: (N), same shape as the Pos_score - Output: scalar. Examples:: >>> loss = ConvNCFBPRLoss() >>> pos_score = torch.randn(3, requires_grad=True) >>> neg_score = torch.randn(3, requires_grad=True) >>> output = loss(pos_score, neg_score) >>> output.backward() """ def __init__(self): super().__init__() def forward(self, pos_score, neg_score): distance = pos_score - neg_score loss = torch.sum(torch.log(1 + torch.exp(-distance))) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {}) # %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/iz/ciztqj6kop3hxov46yrmzprkzfir3eljcic4mkqznz2j5cfeaudr.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float("-inf") tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = (tmp29 != 0) tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = (tmp33 != 0) tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = (tmp38 != 0) tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = (tmp43 != 0) tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) tl.store(out_ptr2 + (x2), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/x5/cx5uvbfethxuwwkwxf3xaualzhlcwqsz4jxqpbhintggaypzjwqf.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1) tmp2 = tl.load(in_out_ptr0 + (x4), xmask) tmp3 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # context_layer_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py # Topologically Sorted Source Nodes: [add_1, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add_1 => add_1 # hidden_states_2 => var_mean # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_1, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/l3/cl3bnd5pv2p4ydfmlj74bv4mbiwr2ntrdvbubnjubetyhosmxag6.py # Topologically Sorted Source Nodes: [add_1, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # add_1 => add_1 # hidden_states_2 => add_2, add_3, mul, mul_1, rsqrt, sub_1 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1.0), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_11), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf5, primals_8, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf9, buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(buf2, primals_7, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add_1, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_5.run(buf13, primals_3, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_1, hidden_states_2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(buf13, primals_3, buf14, buf15, primals_11, primals_12, buf16, 64, grid=grid(64), stream=stream0) del buf14 del buf15 del primals_12 return (buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class MultiHeadAttention(nn.Module): """ Multi-head Self-attention layers, a attention score dropout layer is introduced. Args: input_tensor (torch.Tensor): the input of the multi-head self-attention layer attention_mask (torch.Tensor): the attention mask for input tensor Returns: hidden_states (torch.Tensor): the output of the multi-head self-attention layer """ def __init__(self, n_heads, hidden_size, hidden_dropout_prob, attn_dropout_prob, layer_norm_eps): super(MultiHeadAttention, self).__init__() if hidden_size % n_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (hidden_size, n_heads)) self.num_attention_heads = n_heads self.attention_head_size = int(hidden_size / n_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(hidden_size, self.all_head_size) self.key = nn.Linear(hidden_size, self.all_head_size) self.value = nn.Linear(hidden_size, self.all_head_size) self.attn_dropout = nn.Dropout(attn_dropout_prob) self.dense = nn.Linear(hidden_size, hidden_size) self.LayerNorm = nn.LayerNorm(hidden_size, eps=layer_norm_eps) self.out_dropout = nn.Dropout(hidden_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_tensor, attention_mask): mixed_query_layer = self.query(input_tensor) mixed_key_layer = self.key(input_tensor) mixed_value_layer = self.value(input_tensor) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.attn_dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) hidden_states = self.dense(context_layer) hidden_states = self.out_dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'hidden_size': 4, 'hidden_dropout_prob': 0.5, 'attn_dropout_prob': 0.5, 'layer_norm_eps': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float('-inf') tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = tmp29 != 0 tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = tmp33 != 0 tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38 != 0 tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) tl.store(out_ptr2 + x2, tmp45, xmask) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x4, xmask) tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](buf13, primals_3, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](buf13, primals_3, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf14 del buf15 del primals_12 return buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, ( 16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0 ), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9 class MultiHeadAttentionNew(nn.Module): """ Multi-head Self-attention layers, a attention score dropout layer is introduced. Args: input_tensor (torch.Tensor): the input of the multi-head self-attention layer attention_mask (torch.Tensor): the attention mask for input tensor Returns: hidden_states (torch.Tensor): the output of the multi-head self-attention layer """ def __init__(self, n_heads, hidden_size, hidden_dropout_prob, attn_dropout_prob, layer_norm_eps): super(MultiHeadAttentionNew, self).__init__() if hidden_size % n_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (hidden_size, n_heads)) self.num_attention_heads = n_heads self.attention_head_size = int(hidden_size / n_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(hidden_size, self.all_head_size) self.key = nn.Linear(hidden_size, self.all_head_size) self.value = nn.Linear(hidden_size, self.all_head_size) self.attn_dropout = nn.Dropout(attn_dropout_prob) self.dense = nn.Linear(hidden_size, hidden_size) self.LayerNorm = nn.LayerNorm(hidden_size, eps=layer_norm_eps) self.out_dropout = nn.Dropout(hidden_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_9 = self.dense.weight primals_10 = self.dense.bias primals_11 = self.LayerNorm.weight primals_12 = self.LayerNorm.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
Ahren09/RecBole
MultiHeadAttention
false
1,945
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import math import torch import torch.nn as nn class Model(nn.Module): """ Multi-head Self-attention layers, a attention score dropout layer is introduced. Args: input_tensor (torch.Tensor): the input of the multi-head self-attention layer attention_mask (torch.Tensor): the attention mask for input tensor Returns: hidden_states (torch.Tensor): the output of the multi-head self-attention layer """ def __init__(self, n_heads, hidden_size, hidden_dropout_prob, attn_dropout_prob, layer_norm_eps): super().__init__() if hidden_size % n_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (hidden_size, n_heads)) self.num_attention_heads = n_heads self.attention_head_size = int(hidden_size / n_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(hidden_size, self.all_head_size) self.key = nn.Linear(hidden_size, self.all_head_size) self.value = nn.Linear(hidden_size, self.all_head_size) self.attn_dropout = nn.Dropout(attn_dropout_prob) self.dense = nn.Linear(hidden_size, hidden_size) self.LayerNorm = nn.LayerNorm(hidden_size, eps=layer_norm_eps) self.out_dropout = nn.Dropout(hidden_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_tensor, attention_mask): mixed_query_layer = self.query(input_tensor) mixed_key_layer = self.key(input_tensor) mixed_value_layer = self.value(input_tensor) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.attn_dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) hidden_states = self.dense(context_layer) hidden_states = self.out_dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_heads': 4, 'hidden_size': 4, 'hidden_dropout_prob': 0.5, 'attn_dropout_prob': 0.5, 'layer_norm_eps': 1}]
LeNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/gn/cgn37hvkbudswcotxg2kd4nkddnptf52plxombym7nh7tjs5rbbc.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # x => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 784) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/jk/cjkfw7q2cdmjf2ab4jynamfj22kvje5c7y7o3cr7nyj7yej65x5e.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = (xindex // 14) x4 = xindex x3 = (xindex // 3136) x5 = xindex % 3136 tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4), tmp6, xmask) tl.store(out_ptr1 + (x5 + (3200*x3)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/vi/cvii2lvgowi4ilvgcqvcouxsr3dqcg3yuq3ktwlv4tmvp2ll652d.py # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_2 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 100) % 32 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ui/cui3s4mqognsjp5q5f2iwenfoqbabemexibxzboo4z6bekyz74i4.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_3 => _low_memory_max_pool2d_with_offsets_1, getitem_3 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = (xindex // 5) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/jn/cjnqv3sgcv5x2iz7ij5zdad6ofabcnonrlksgsxu2ob7n274gz6b.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_5 => relu_2 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/6m/c6m6u2ctjb4r4ra3sizrwezzkzegfp2ombflmfg3dwjfci2pen7h.py # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_6 => relu_3 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (16, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (120, 800), (800, 1)) assert_size_stride(primals_7, (120, ), (1, )) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84, ), (1, )) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 28, 28), (12544, 784, 28, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 50176, grid=grid(50176), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 16, 14, 14), (3136, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 14, 14), (3200, 196, 14, 1), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 12544, grid=grid(12544), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 10, 10), (3200, 100, 10, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 12800, grid=grid(12800), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 32, 5, 5), (800, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 32, 5, 5), (800, 25, 5, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 3200, grid=grid(3200), stream=stream0) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf7, (4, 800), (800, 1), 0), reinterpret_tensor(primals_6, (800, 120), (1, 800), 0), out=buf8) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu] triton_poi_fused_relu_4.run(buf9, primals_7, 480, grid=grid(480), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu] triton_poi_fused_relu_5.run(buf11, primals_9, 336, grid=grid(336), stream=stream0) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 800), (800, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 16, 5, 5), (400, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((120, 800), (800, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((10, 84), (84, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() self.conv1 = nn.Conv2d(3, 16, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 5) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(32 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool1(x) x = F.relu(self.conv2(x)) x = self.pool2(x) x = x.view(-1, 32 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 784 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 x4 = xindex x3 = xindex // 3136 x5 = xindex % 3136 tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x1), xmask, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x4, tmp6, xmask) tl.store(out_ptr1 + (x5 + 3200 * x3), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x1 = xindex // 5 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 480 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 120 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 336 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 84 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (16, 3, 5, 5), (75, 25, 5, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (32, 16, 5, 5), (400, 25, 5, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (120, 800), (800, 1)) assert_size_stride(primals_7, (120,), (1,)) assert_size_stride(primals_8, (84, 120), (120, 1)) assert_size_stride(primals_9, (84,), (1,)) assert_size_stride(primals_10, (10, 84), (84, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 28, 28), (12544, 784, 28, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(50176)](buf1, primals_2, 50176, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 16, 14, 14), (3136, 196, 14, 1), torch.float32) buf3 = empty_strided_cuda((4, 16, 14, 14), (3200, 196, 14, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(12544)](buf1, buf2, buf3, 12544, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 10, 10), (3200, 100, 10, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(12800)](buf5, primals_5, 12800, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 5, 5), (800, 25, 5, 1), torch.int8) buf7 = empty_strided_cuda((4, 32, 5, 5), (800, 25, 5, 1), torch.float32 ) triton_poi_fused_max_pool2d_with_indices_3[grid(3200)](buf5, buf6, buf7, 3200, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 800), (800, 1), 0), reinterpret_tensor(primals_6, (800, 120), (1, 800), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf11, reinterpret_tensor( primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf12) del primals_11 return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 800), (800, 1), 0), buf9, buf11, primals_10, primals_8, primals_6) class LeNetNew(nn.Module): def __init__(self): super(LeNetNew, self).__init__() self.conv1 = nn.Conv2d(3, 16, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 5) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(32 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_10 = self.fc3.weight primals_11 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
AluminiumOxide/pytorch_base_-tutorial
LeNet
false
1,946
[ "Apache-2.0" ]
0
a6d3bea6070c7c774dcd7c55d94b0a1441548c8b
https://github.com/AluminiumOxide/pytorch_base_-tutorial/tree/a6d3bea6070c7c774dcd7c55d94b0a1441548c8b
import torch from torch import nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 16, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 5) self.pool2 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(32 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool1(x) x = F.relu(self.conv2(x)) x = self.pool2(x) x = x.view(-1, 32 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return []
Add
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/3n/c3nsxs3ge5j4meuwmimcituu5g5fkwyjrcs4loqlfmknmkhovm7j.py # Topologically Sorted Source Nodes: [result_1, result_2, result_3], Original ATen: [aten.add] # Source node to ATen node mapping: # result_1 => add # result_2 => add_1 # result_3 => add_2 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, %select_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %select_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %select_3), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [result_1, result_2, result_3], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from typing import List from torch import nn from typing import Tuple from typing import Union class Add(nn.Module): """Add module for Kindle.""" def __init_(self): """Initialize module.""" super().__init__() @classmethod def forward(cls, x: 'Union[Tuple[torch.Tensor, ...], List[torch.Tensor]]' ) ->torch.Tensor: """Add inputs. Args: x: list of torch tensors Returns: sum of all x's """ result = x[0] for i in range(1, len(x)): result = result + x[i] return result def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp3 = tl.load(in_ptr0 + (128 + x0), xmask) tmp5 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class AddNew(nn.Module): """Add module for Kindle.""" def __init_(self): """Initialize module.""" super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Anon-Artist/kindle
Add
false
1,947
[ "MIT" ]
0
7e62e370e0130e6c61db6cdd339a451d5f1f8985
https://github.com/Anon-Artist/kindle/tree/7e62e370e0130e6c61db6cdd339a451d5f1f8985
import torch from typing import List from torch import nn from typing import Tuple from typing import Union class Model(nn.Module): """Add module for Kindle.""" def __init_(self): """Initialize module.""" super().__init__() @classmethod def forward(cls, x: 'Union[Tuple[torch.Tensor, ...], List[torch.Tensor]]' ) ->torch.Tensor: """Add inputs. Args: x: list of torch tensors Returns: sum of all x's """ result = x[0] for i in range(1, len(x)): result = result + x[i] return result def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/zf/czfnaeipqg4a3qzttb2l6zy5ng44vshk3lfmp25jc2er665hxsmw.py # Topologically Sorted Source Nodes: [mean, sub], Original ATen: [aten.mean, aten.sub] # Source node to ATen node mapping: # mean => mean # sub => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ix/cix3agfx5ttdzebqllvt7xsyf7hguiv4c5ya7rpk6x57inkbm4xh.py # Topologically Sorted Source Nodes: [pow_1, variance, add, rsqrt, x, mul_1, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] # Source node to ATen node mapping: # add => add # mul_1 => mul_1 # pow_1 => pow_1 # rsqrt => rsqrt # variance => mean_1 # x => mul # x_1 => add_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 0.0001), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %view), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %view_1), kwargs = {}) triton_poi_fused_add_mean_mul_pow_rsqrt_1 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_rsqrt_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_rsqrt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + (x3), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, sub], Original ATen: [aten.mean, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mean_sub_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, variance, add, rsqrt, x, mul_1, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] triton_poi_fused_add_mean_mul_pow_rsqrt_1.run(buf0, primals_2, primals_3, buf1, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 del primals_3 return (buf1, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data class LayerNorm(nn.Module): def __init__(self, channels, eps=0.0001): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, x): n_dims = len(x.shape) mean = torch.mean(x, 1, keepdim=True) variance = torch.mean((x - mean) ** 2, 1, keepdim=True) x = (x - mean) * torch.rsqrt(variance + self.eps) shape = [1, -1] + [1] * (n_dims - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 0.0001 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tmp19 = tmp17 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x3, tmp21, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(256)](primals_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mean_mul_pow_rsqrt_1[grid(256)](buf0, primals_2, primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 del primals_3 return buf1, primals_1 class LayerNormNew(nn.Module): def __init__(self, channels, eps=0.0001): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AndreHe02/glow-tts
LayerNorm
false
1,948
[ "MIT" ]
0
683f68f17790f2f46c23e9d3eadbcac352d82e2b
https://github.com/AndreHe02/glow-tts/tree/683f68f17790f2f46c23e9d3eadbcac352d82e2b
import torch from torch import nn import torch.utils.data class Model(nn.Module): def __init__(self, channels, eps=0.0001): super().__init__() self.channels = channels self.eps = eps self.gamma = nn.Parameter(torch.ones(channels)) self.beta = nn.Parameter(torch.zeros(channels)) def forward(self, x): n_dims = len(x.shape) mean = torch.mean(x, 1, keepdim=True) variance = torch.mean((x - mean) ** 2, 1, keepdim=True) x = (x - mean) * torch.rsqrt(variance + self.eps) shape = [1, -1] + [1] * (n_dims - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
ReviewClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/f3/cf3wjo3codglmel3mdjaodbq3s3viwdoc74iaz5e3kntwsnjtjqi.py # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # out_1 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class ReviewClassifier(nn.Module): def __init__(self, n_feature): super(ReviewClassifier, self).__init__() self.lf = nn.Linear(n_feature, 1, dtype=torch.float32) def forward(self, x): out = self.lf(x) out = F.sigmoid(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_feature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class ReviewClassifierNew(nn.Module): def __init__(self, n_feature): super(ReviewClassifierNew, self).__init__() self.lf = nn.Linear(n_feature, 1, dtype=torch.float32) def forward(self, input_0): primals_1 = self.lf.weight primals_2 = self.lf.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
AnissHal/tal
ReviewClassifier
false
1,949
[ "MIT" ]
0
6e96ffa367be6da54383ae9e6b0f56f7b5cf9a92
https://github.com/AnissHal/tal/tree/6e96ffa367be6da54383ae9e6b0f56f7b5cf9a92
import torch import torch.nn.functional as F import torch.nn as nn class Model(nn.Module): def __init__(self, n_feature): super().__init__() self.lf = nn.Linear(n_feature, 1, dtype=torch.float32) def forward(self, x): out = self.lf(x) out = F.sigmoid(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [4]
RegLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/3l/c3lzkatm5eqqfbbtcqxxw7oujhlvnqrxq6gcyetj3kemmpsltiiy.py # Topologically Sorted Source Nodes: [reg_loss, norm_1, reg_loss_1, norm_2, reg_loss_2, norm_3, reg_loss_3], Original ATen: [aten.linalg_vector_norm, aten.add] # Source node to ATen node mapping: # norm_1 => pow_3, pow_4, sum_2 # norm_2 => pow_5, pow_6, sum_3 # norm_3 => pow_7, pow_8, sum_4 # reg_loss => pow_1, pow_2, sum_1 # reg_loss_1 => add # reg_loss_2 => add_1 # reg_loss_3 => add_2 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, %pow_4), kwargs = {}) # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_2, 2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, None), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %pow_6), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_3, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, None), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_4, 0.5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %pow_8), kwargs = {}) triton_per_fused_add_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr0 + (64 + r0), None) tmp10 = tl.load(in_ptr0 + (128 + r0), None) tmp15 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = libdevice.sqrt(tmp4) tmp21 = libdevice.sqrt(tmp9) tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp14) tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp19) tmp26 = tmp24 + tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [reg_loss, norm_1, reg_loss_1, norm_2, reg_loss_2, norm_3, reg_loss_3], Original ATen: [aten.linalg_vector_norm, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_0.run(buf4, arg0_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class RegLoss(nn.Module): """ RegLoss, L2 regularization on model parameters """ def __init__(self): super(RegLoss, self).__init__() def forward(self, parameters): reg_loss = None for W in parameters: if reg_loss is None: reg_loss = W.norm(2) else: reg_loss = reg_loss + W.norm(2) return reg_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr0 + (64 + r0), None) tmp10 = tl.load(in_ptr0 + (128 + r0), None) tmp15 = tl.load(in_ptr0 + (192 + r0), None) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = libdevice.sqrt(tmp4) tmp21 = libdevice.sqrt(tmp9) tmp22 = tmp20 + tmp21 tmp23 = libdevice.sqrt(tmp14) tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp19) tmp26 = tmp24 + tmp25 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_0[grid(1)](buf4, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf4, class RegLossNew(nn.Module): """ RegLoss, L2 regularization on model parameters """ def __init__(self): super(RegLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Ahren09/RecBole
RegLoss
false
1,950
[ "MIT" ]
0
b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
https://github.com/Ahren09/RecBole/tree/b3921818dfbc1b81f9eda8d5e9f05bc9d9114089
import torch import torch.nn as nn class Model(nn.Module): """ RegLoss, L2 regularization on model parameters """ def __init__(self): super().__init__() def forward(self, parameters): reg_loss = None for W in parameters: if reg_loss is None: reg_loss = W.norm(2) else: reg_loss = reg_loss + W.norm(2) return reg_loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return []
AttentionUnit
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/4u/c4uzfozwsgh3le5rfytdcr7fatpzxslwn6zrlgczuib4kxua4g25.py # Topologically Sorted Source Nodes: [add, sumTanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # sumTanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %view_1), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + (x3), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(in_out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] # Source node to ATen node mapping: # alpha => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, sumTanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf2, buf1, primals_6, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 del primals_6 buf4 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [vProj], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [alpha], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0) del buf5 return (buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_4, buf2, buf6, primals_7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F from torch.nn import init class AttentionUnit(nn.Module): def __init__(self, sDim, xDim, attDim): """ sDim, xDim -> attDim -> 1 Params: - sDim: decoder的hidden layer dim - xDim: encoder的output layer dim - attDim: attention单元的dim """ super(AttentionUnit, self).__init__() self.sDim = sDim self.xDim = xDim self.attDim = attDim self.sEmbed = nn.Linear(sDim, attDim) self.xEmbed = nn.Linear(xDim, attDim) self.wEmbed = nn.Linear(attDim, 1) def init_weights(self): init.normal_(self.sEmbed.weight, std=0.01) init.constant_(self.sEmbed.bias, 0) init.normal_(self.xEmbed.weight, std=0.01) init.constant_(self.xEmbed.bias, 0) init.normal_(self.wEmbed.weight, std=0.01) init.constant_(self.wEmbed.bias, 0) def forward(self, x, sPrev): """ Params: - x: encoder的所有hidden stats shape=[batch_size, time_step, encoder_dim] - sPrev: decoder 的 h(t-1) """ batch_size, T, _ = x.size() x = x.view(-1, self.xDim) xProj = self.xEmbed(x) xProj = xProj.view(batch_size, T, -1) sPrev = sPrev.squeeze(0) sProj = self.sEmbed(sPrev) sProj = torch.unsqueeze(sProj, 1) sProj = sProj.expand(batch_size, T, self.attDim) sumTanh = torch.tanh(sProj + xProj) sumTanh = sumTanh.view(-1, self.attDim) vProj = self.wEmbed(sumTanh) vProj = vProj.view(batch_size, T) alpha = F.softmax(vProj, dim=1) return alpha def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'sDim': 4, 'xDim': 4, 'attDim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr0 + x3, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = libdevice.tanh(tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1) del primals_5 buf2 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(64)](buf2, buf1, primals_6, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 del primals_6 buf4 = reinterpret_tensor(buf1, (16, 1), (1, 1), 0) del buf1 extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_8 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), primals_4, buf2, buf6, primals_7 class AttentionUnitNew(nn.Module): def __init__(self, sDim, xDim, attDim): """ sDim, xDim -> attDim -> 1 Params: - sDim: decoder的hidden layer dim - xDim: encoder的output layer dim - attDim: attention单元的dim """ super(AttentionUnitNew, self).__init__() self.sDim = sDim self.xDim = xDim self.attDim = attDim self.sEmbed = nn.Linear(sDim, attDim) self.xEmbed = nn.Linear(xDim, attDim) self.wEmbed = nn.Linear(attDim, 1) def init_weights(self): init.normal_(self.sEmbed.weight, std=0.01) init.constant_(self.sEmbed.bias, 0) init.normal_(self.xEmbed.weight, std=0.01) init.constant_(self.xEmbed.bias, 0) init.normal_(self.wEmbed.weight, std=0.01) init.constant_(self.wEmbed.bias, 0) def forward(self, input_0, input_1): primals_2 = self.sEmbed.weight primals_3 = self.sEmbed.bias primals_4 = self.xEmbed.weight primals_6 = self.xEmbed.bias primals_7 = self.wEmbed.weight primals_8 = self.wEmbed.bias primals_1 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Annihilation7/aster
AttentionUnit
false
1,951
[ "MIT" ]
0
eab6946eb1f99e395abc56c3446cd05caa90e791
https://github.com/Annihilation7/aster/tree/eab6946eb1f99e395abc56c3446cd05caa90e791
import torch from torch import nn import torch.nn.functional as F from torch.nn import init class Model(nn.Module): def __init__(self, sDim, xDim, attDim): """ sDim, xDim -> attDim -> 1 Params: - sDim: decoder的hidden layer dim - xDim: encoder的output layer dim - attDim: attention单元的dim """ super().__init__() self.sDim = sDim self.xDim = xDim self.attDim = attDim self.sEmbed = nn.Linear(sDim, attDim) self.xEmbed = nn.Linear(xDim, attDim) self.wEmbed = nn.Linear(attDim, 1) def init_weights(self): init.normal_(self.sEmbed.weight, std=0.01) init.constant_(self.sEmbed.bias, 0) init.normal_(self.xEmbed.weight, std=0.01) init.constant_(self.xEmbed.bias, 0) init.normal_(self.wEmbed.weight, std=0.01) init.constant_(self.wEmbed.bias, 0) def forward(self, x, sPrev): """ Params: - x: encoder的所有hidden stats shape=[batch_size, time_step, encoder_dim] - sPrev: decoder 的 h(t-1) """ batch_size, T, _ = x.size() x = x.view(-1, self.xDim) xProj = self.xEmbed(x) xProj = xProj.view(batch_size, T, -1) sPrev = sPrev.squeeze(0) sProj = self.sEmbed(sPrev) sProj = torch.unsqueeze(sProj, 1) sProj = sProj.expand(batch_size, T, self.attDim) sumTanh = torch.tanh(sProj + xProj) sumTanh = sumTanh.view(-1, self.attDim) vProj = self.wEmbed(sumTanh) vProj = vProj.view(batch_size, T) alpha = F.softmax(vProj, dim=1) return alpha def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [4, 4, 4]
LossAttentionLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/rm/crmrreocq4mcn3gdi2z6jtfshrtqjadzuvqp3i3ifd4buyz2b7dn.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => max_1 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%addmm,), kwargs = {}) triton_per_fused_max_0 = async_compile.triton('triton_per_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/t4/ct4jf6radjm3uzbb4e7ron4q2zhxuslsrukzym4htoik2yt7gdd4.py # Topologically Sorted Source Nodes: [out, out_1, out_2, sum_2, alpha], Original ATen: [aten.sub, aten.exp, aten.sum, aten.div] # Source node to ATen node mapping: # alpha => div # out => sub # out_1 => exp # out_2 => sum_1 # sum_2 => sum_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm, %max_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [0]), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {}) triton_per_fused_div_exp_sub_sum_1 = async_compile.triton('triton_per_fused_div_exp_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_exp_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp3 = tmp0 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp5 - tmp2 tmp7 = tl_math.exp(tmp6) tmp8 = tmp4 + tmp7 tmp10 = tmp9 - tmp2 tmp11 = tl_math.exp(tmp10) tmp12 = tmp8 + tmp11 tmp14 = tmp13 - tmp2 tmp15 = tl_math.exp(tmp14) tmp16 = tmp12 + tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = tmp16 / tmp19 tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp16, None) tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp20, None) tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp19, None) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/uy/cuyss45hwj42bac2sjfffjg6eowguip5yiuiltwfw3j2zoocdwsw.py # Topologically Sorted Source Nodes: [alpha01, context], Original ATen: [aten.mul] # Source node to ATen node mapping: # alpha01 => mul # context => mul_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, 4), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %mul), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tmp0 * tmp6 tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [out_c], Original ATen: [aten.addmm] extern_kernels.addmm(arg0_1, arg2_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_per_fused_max_0.run(buf0, buf1, 1, 16, grid=grid(1), stream=stream0) buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf3 = empty_strided_cuda((1, ), (1, ), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [out, out_1, out_2, sum_2, alpha], Original ATen: [aten.sub, aten.exp, aten.sum, aten.div] triton_per_fused_div_exp_sub_sum_1.run(buf0, buf1, buf2, buf3, buf5, 1, 4, grid=grid(1), stream=stream0) del buf1 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [alpha01, context], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(arg2_1, buf2, buf3, buf4, 16, grid=grid(16), stream=stream0) del arg2_1 del buf2 del buf3 return (buf4, buf0, reinterpret_tensor(buf5, (4, ), (1, ), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class LossAttentionLayer(nn.Module): def __init__(self): super(LossAttentionLayer, self).__init__() def forward(self, features, W_1, b_1): out_c = F.linear(features, W_1, b_1) out = out_c - out_c.max() out = out.exp() out = out.sum(1, keepdim=True) alpha = out / out.sum(0) alpha01 = features.size(0) * alpha.expand_as(features) context = torch.mul(features, alpha01) return context, out_c, torch.squeeze(alpha) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None) @triton.jit def triton_per_fused_div_exp_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp3 = tmp0 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp5 - tmp2 tmp7 = tl_math.exp(tmp6) tmp8 = tmp4 + tmp7 tmp10 = tmp9 - tmp2 tmp11 = tl_math.exp(tmp10) tmp12 = tmp8 + tmp11 tmp14 = tmp13 - tmp2 tmp15 = tl_math.exp(tmp14) tmp16 = tmp12 + tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.sum(tmp17, 1)[:, None] tmp20 = tmp16 / tmp19 tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp16, None) tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp20, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None) @triton.jit def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = tmp1 / tmp3 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tmp0 * tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(arg0_1, arg2_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_max_0[grid(1)](buf0, buf1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf3 = empty_strided_cuda((1,), (1,), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_per_fused_div_exp_sub_sum_1[grid(1)](buf0, buf1, buf2, buf3, buf5, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_mul_2[grid(16)](arg2_1, buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg2_1 del buf2 del buf3 return buf4, buf0, reinterpret_tensor(buf5, (4,), (1,), 0) class LossAttentionLayerNew(nn.Module): def __init__(self): super(LossAttentionLayerNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1], output[2]
AnetaKaczynska/ProtoPNet
LossAttentionLayer
false
1,952
[ "MIT" ]
0
7de2aa57833586ccfd8e63dc835c8cc9db727a2f
https://github.com/AnetaKaczynska/ProtoPNet/tree/7de2aa57833586ccfd8e63dc835c8cc9db727a2f
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Model(nn.Module): def __init__(self): super().__init__() def forward(self, features, W_1, b_1): out_c = F.linear(features, W_1, b_1) out = out_c - out_c.max() out = out.exp() out = out.sum(1, keepdim=True) alpha = out / out.sum(0) alpha01 = features.size(0) * alpha.expand_as(features) context = torch.mul(features, alpha01) return context, out_c, torch.squeeze(alpha) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return []